From 4578a3d51d046682bafee61867800d06fcbf0520 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Fri, 6 Oct 2023 10:16:20 +0200 Subject: [PATCH 01/23] using the newest version of segmentation and diarization --- tutorials/intro.ipynb | 7977 +++++++++++++++++++++-------------------- 1 file changed, 3994 insertions(+), 3983 deletions(-) diff --git a/tutorials/intro.ipynb b/tutorials/intro.ipynb index 3793bfe09..7082d76c9 100644 --- a/tutorials/intro.ipynb +++ b/tutorials/intro.ipynb @@ -1,4066 +1,4077 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "9-KmdPlBYnp6" - }, - "source": [ - "\"Open" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "9-KmdPlBYnp6" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1Fs2d8otYnp7" + }, + "source": [ + "[`pyannote.audio`](https://github.com/pyannote/pyannote-audio) is an open-source toolkit written in Python for **speaker diarization**. \n", + "\n", + "Based on [`PyTorch`](https://pytorch.org) machine learning framework, it provides a set of trainable end-to-end neural building blocks that can be combined and jointly optimized to build speaker diarization pipelines. \n", + "\n", + "`pyannote.audio` also comes with pretrained [models](https://huggingface.co/models?other=pyannote-audio-model) and [pipelines](https://huggingface.co/models?other=pyannote-audio-pipeline) covering a wide range of domains for voice activity detection, speaker segmentation, overlapped speech detection, speaker embedding reaching state-of-the-art performance for most of them. \n", + "\n", + "**This notebook will teach you how to apply those pretrained pipelines on your own data.**\n", + "\n", + "Make sure you run it using a GPU (or it might otherwise be slow...)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tckHJKZnYnp7" + }, + "source": [ + "## Installation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, - { - "cell_type": "markdown", - "metadata": { - "id": "1Fs2d8otYnp7" - }, - "source": [ - "[`pyannote.audio`](https://github.com/pyannote/pyannote-audio) is an open-source toolkit written in Python for **speaker diarization**. \n", - "\n", - "Based on [`PyTorch`](https://pytorch.org) machine learning framework, it provides a set of trainable end-to-end neural building blocks that can be combined and jointly optimized to build speaker diarization pipelines. \n", - "\n", - "`pyannote.audio` also comes with pretrained [models](https://huggingface.co/models?other=pyannote-audio-model) and [pipelines](https://huggingface.co/models?other=pyannote-audio-pipeline) covering a wide range of domains for voice activity detection, speaker segmentation, overlapped speech detection, speaker embedding reaching state-of-the-art performance for most of them. \n", - "\n", - "**This notebook will teach you how to apply those pretrained pipelines on your own data.**\n", - "\n", - "Make sure you run it using a GPU (or it might otherwise be slow...)" - ] + "id": "ai082p4HYnp7", + "outputId": "bb673846-8b58-4743-cea2-6c6270632d7f", + "vscode": { + "languageId": "python" + } + }, + "outputs": [], + "source": [ + "!pip install -qq https://github.com/pyannote/pyannote-audio/archive/refs/heads/develop.zip\n", + "!pip install -qq ipython==7.34.0" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qggK-7VBYnp8" + }, + "source": [ + "# Visualization with `pyannote.core`\n", + "\n", + "For the purpose of this notebook, we will download and use an audio file coming from the [AMI corpus](http://groups.inf.ed.ac.uk/ami/corpus/), which contains a conversation between 4 people in a meeting room." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uJWoQiJgYnp8", + "vscode": { + "languageId": "python" + } + }, + "outputs": [], + "source": [ + "!wget -q http://groups.inf.ed.ac.uk/ami/AMICorpusMirror/amicorpus/ES2004a/audio/ES2004a.Mix-Headset.wav\n", + "DEMO_FILE = {'uri': 'ES2004a.Mix-Headset', 'audio': 'ES2004a.Mix-Headset.wav'}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EPIapoCJYnp8" + }, + "source": [ + "Because AMI is a benchmarking dataset, it comes with manual annotations (a.k.a *groundtruth*). \n", + "Let us load and visualize the expected output of the speaker diarization pipeline.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Mmm0Q22JYnp8", + "vscode": { + "languageId": "python" + } + }, + "outputs": [], + "source": [ + "!wget -q https://raw.githubusercontent.com/pyannote/AMI-diarization-setup/main/only_words/rttms/test/ES2004a.rttm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 233 }, + "id": "ToqCwl_FYnp9", + "outputId": "a1d9631f-b198-44d1-ff6d-ec304125a9f4", + "vscode": { + "languageId": "python" + } + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "tckHJKZnYnp7" - }, - "source": [ - "## Installation" + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABiYAAADyCAYAAADJJ33UAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdeXxU1f3/8XfWyWTfSAIYdtmhIFh+4Nq6l0dbv92+ttS131b7xa2LtdYNtSp1rWIrgrsVqIq461dUXJBFRNljAoRNIAlknewhub8/6Iwzk9mXO0l4PR8PHiT3nnvuuWf53HPnwJ04wzAMAQAAAAAAAAAAmCA+1gUAAAAAAAAAAADHDhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmOaYXJi45JJLFBcX1+3Pjh07vO4799xzHccPGTLEY5q5c+c60uzdu1czZ85UamqqCgoKdN111+nIkSOO/QcPHtQvfvELjRw5UvHx8br22mu7lfPll1/W1KlTlZ2drbS0NE2aNEnPPfdcdCunF+st7SpJdXV1mj17tvr37y+LxaKRI0fqrbfeil7l9HL29rviiiu67Zs9e7bi4uJ0ySWXuKTtKW1tt2TJEsXFxen8888Pszb6rki3syStWrVK3/ve95STk6OUlBRNmDBBDzzwgDo7O13S1dTUaNasWcrMzFR2drZ+9atfqbGx0bG/tbVVl1xyiSZMmKDExESP7fjhhx96LFNFRUUEagcAAAAAAKDvSIx0hrVN7ZHO0qectOSQjjv33HP11FNPuWzr16+f130Wi8Xl99tvv12//vWvXbZlZGRIkjo7OzVz5kwVFRVp1apVOnjwoC666CIlJSXprrvukiS1tbWpX79+uummm/Tggw96LGNubq5uvPFGjR49WsnJyXrjjTd06aWXqqCgQOecc05I1x2O+rZ6U8+XZckK+pje0K7t7e0666yzVFBQoJdeekkDBw7Unj17lJ2dHfT1RkpLfatp57JmpYR0XHFxsZYsWaIHH3xQVqtV0tEPixctWqRBgwa5pO0pbW23e/du/fGPf9Qpp5wS/IVHUGd1tannS8jLC/qYSLbzsmXL9LOf/UyXXnqpVqxYoezsbL333nv605/+pNWrV+uFF15QXFycJGnWrFk6ePCgli9fro6ODl166aX6zW9+o0WLFkk62iesVquuvvpqLV261Oc1lJaWKjMz0/F7QUFB0PUAAAAAAADQl0V8YeK8e1ZEOkuf1twW2gf0FotFRUVFQe+zy8jI8Jrm3Xff1bZt2/Tee++psLBQkyZN0h133KHrr79ec+bMUXJysoYMGaKHHnpIkvTkk096zOf00093+f2aa67RM888o5UrV8ZkYeLCt39h6vleO//NoI/pDe365JNPqqamRqtWrVJSUpKko/+CP5aevegl0851+au/DOm4E044QTt37tTLL7+sWbNmSTr6v4oGDRqkoUOHuqTtKW0tHf1Ae9asWbrtttv0ySefqK6uLpjLjqiKiZNMPd/A/fuCPiZS7dzU1KRf//rX+sEPfqAFCxY4tv/P//yPCgsL9YMf/EAvvPCC/vu//1slJSV65513tG7dOk2dOlWSNG/ePH3ve9/TfffdpwEDBigtLU2PPvqoJOnTTz/12Y4FBQUxXWgEAAAAAADo6Y7JVzlF2+rVqzVhwgQVFhY6tp1zzjlqaGjQ1q1bQ8rTMAy9//77Ki0t1amnnhqpoiIIkWrX1157TdOnT9fs2bNVWFio8ePH66677ur2ahl0d9lll7n8C/knn3xSl156acTPE8kxfPvtt6ugoEC/+tWvIl3MPisS7fzuu++qurpaf/zjH7vt+/73v6+RI0dq8eLFko62d3Z2tmNRQpLOPPNMxcfHa+3atUGXf9KkSerfv7/OOussffrpp0EfDwAAAAAA0NcdswsTb7zxhtLT0x1/fvrTn3rdl56e7nh9i93111/fLc0nn3wiSaqoqHD5QFOS4/dg3zVeX1+v9PR0JScna+bMmZo3b57OOuusUC75mNAb2rW8vFwvvfSSOjs79dZbb+nmm2/W/fffr7/+9a+hXvYx45e//KVWrlypPXv2aM+ePfr000/1y192/x8YPaWtV65cqSeeeEILFy4M9lKPaZFo57KyMknSmDFjPJ5j9OjRjjQVFRXdXreUmJio3NzcoNq7f//+mj9/vpYuXaqlS5equLhYp59+ur744ouA8wAAAAAAADgWRPxVTr3Fd77zHcdrOSQpLS3N6z7p6Pc9OLvuuuscX8JqN3DgwIiXMyMjQxs2bFBjY6Pef/99/f73v9ewYcO6veYJR/WGdu3q6lJBQYEWLFighIQETZkyRfv379e9996rW2+9NaLn6mv69eunmTNn6umnn5ZhGJo5c6by8/O7pesJbW2z2XThhRdq4cKFHssI7yLZzoZhRLWszkaNGqVRo0Y5fp8xY4Z27typBx98UM8995xp5QAAAAAAAOjpIr4w8fafvhPpLKMiLS1NI0aMCHqfXX5+vtc0RUVF+uyzz1y2VVZWOvYFIz4+3nGeSZMmqaSkRHfffXdMFiaeO2+R6ecMVm9o1/79+yspKUkJCQmObWPGjFFFRYXa29uVnBzaF7qH46Jnf2L6OUN12WWX6corr5Qk/eMf//CYpie09c6dO7V79259//vfd2zr6uqSdPRf45eWlmr48OEB5RUpRZs2mHq+cITbziNHjpQklZSUaMaMGd32l5SUaOzYsZKOtmlVVZXL/iNHjqimpibomO3u29/+tlauXBlWHgAAAAAAAH1NxBcmctLM/1C1p5k+fbruvPNOVVVVOV4Psnz5cmVmZjo+CAtVV1eX2traIlHMoGVZsmJy3p4iUu160kknadGiRerq6lJ8/NG3qZWVlal///4xWZSQJGtWSkzOG4pzzz1X7e3tiouLi9qXwEeirUePHq3Nmze7bLvppptks9n00EMPqbi4OOLl9ichL8/0c4Yq3HY+++yzlZubq/vvv7/bwsRrr72m7du364477pB0tL3r6uq0fv16TZkyRZL0wQcfqKurS9OmTQvrOjZs2KD+/fuHlQcAAAAAAEBfc8y+ysmXtra2bu8VT0xMdHmViM1m65YmNTVVmZmZOvvsszV27FhdeOGFuueee1RRUaGbbrpJs2fPlsVicaTfsOHov15ubGzUoUOHtGHDBiUnJzs++Lz77rs1depUDR8+XG1tbXrrrbf03HPPdXt1CQLTU9r1t7/9rR555BFdc801uuqqq7R9+3bddddduvrqq6N16X1KQkKCSkpKHD970hPaOiUlRePHj3fJPzs7W5K6bUd34bZzWlqaHnvsMV1wwQX6zW9+oyuvvFKZmZl6//33dd111+knP/mJfvazn0k6+j+Wzj33XP3617/W/Pnz1dHRoSuvvFIXXHCBBgwY4Mh727Ztam9vV01NjWw2m6P9J02aJEn6+9//rqFDh2rcuHFqbW3V448/rg8++EDvvvtuxOsHAAAAAACgVzOOQRdffLHxwx/+0Os+Sd3+jBo1ypFm8ODBHtNcfvnljjS7d+82zjvvPMNqtRr5+fnGH/7wB6Ojo8PlXJ7yGDx4sGP/jTfeaIwYMcJISUkxcnJyjOnTpxtLliyJbGX0Ib2lXQ3DMFatWmVMmzbNsFgsxrBhw4w777zTOHLkSOQqo4/x1baGYRg//OEPjYsvvtiRtie1dTDXcayLdDsbhmF8/PHHxjnnnGNkZmYaycnJxrhx44z77ruv23irrq42fv7znxvp6elGZmamcemllxo2m80ljbd+Y/e3v/3NGD58uJGSkmLk5uYap59+uvHBBx+EWSsAAAAAAAB9T5xhmPjNoAAAAAAAAAAA4JgWH+sCAAAAAAAAAACAYwcLEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0iaEe2NXVpQMHDigjI0NxcXGRLBMAAAAAAAAAAOhlDMOQzWbTgAEDFB/v/f9FhLwwceDAARUXF4d6OAAAAAAAAAAA6IP27dun4447zuv+kBcmMjIyHCfIzMwMNRsAAAAAAAAAANAHNDQ0qLi42LF+4E3ICxP21zdlZmayMAEAAAAAAAAAACTJ79c/8OXXAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADBNwpw5c+aEcmBbW5vmzp2rP/zwfB359wtKHD5M8enpAR9/2Nam51ft1uD8NKVaEr1ua9+yVTuu+qNe6ipUZl6WXv58nwbnp6m5vdORtrm9U0+s3KjNtuUakj1ILUdatGz7yxqYcZzj57SkdL296y3H3/Z9/9q0TO990an15XUaUZjhOK83Na01WlTyvL6s+kJDsobImmj1mdZeDns6T9uC0VlZqcbHFihx+DAZTU2On73VffuWrar57WwljRunhIICv/l7aoNw2fPMSEl0tF+k8u4N5+8pAq0He7riqj1qvuZq1Q8brSVltqjUm3t/O7Buk57865MaVJiljIGFXtNF22Fbmxau2KG1Ow4HFBd6Ek9l91Z/9u2DuprUMe8htX70kZLGjFaNkeSS/rCtTS+9uV4F/1qoqk/W6CVbhgYPyPGbt70cuWnJ+tenu7rVZ6D3gc7KSjXc/4CjfJ7inT02xmVmau8zS/RijcVvGSPF/b6QXN3gKEvTc/9yidHOMTyQe2awMT/WnMvb0h6vTa9sU9bATHW0dGjLsyvVseRZffHCFh3Y0yprv3SVvLNdWQMzlWxNCij/pppmR572Yzxtc97eld+pN75+3XHfX7p+qepXNCu/ONfjeb3l58zTXMC9z7vHWU/9sLyuXPesm6thWcOVk5ITVF0Hyn7e5LYjemXBZyocnKOsrBSv152UluRol46WDpe68JTGvY68tdGnr6zRxx0rNChnUEDzH/f5krc5lXs71LTWeJ3b+ZuDedvv6zjnfc7zT0/zvpYjLY7yZluyHfNR5+tznqeGMk+MFnv5co/ka+uLpdr35QHlDs72Ow797Qu1HIHWT6Bt6q2dhmQN0b5DHbrpxY0aWZQhQ4rI/eSwrU1PrNyodbWvaXP1hm7PFIHWma+x5dynXt6+NKBnF2fu4+tg40HdsfIBrdmcps92HVBJ83saku19PO8/eEAvzX9VtVsbVDA0X8nWJK/3tJb2eK1fvFH7vjyguCJDL+5+wW95a1pr9PgXi/XMJ9s1qihPuame74v2urY/J0ZiXHkb955igCe+Yn+w9wWz58jhOmxr07+Wl6lp1T6lZiar5J3t6srv1Gvrn1DtP/+hjFHjZGnt1KGHH9e2r7qUPTSv270oUuUIdK4f7Tq259/Z2aW/vrJFI4sylJdh6TZnDHYO6czbZxie+pt9W15Kvt7c9KbWLdqkhm2NShqQ4JhPRfL+FMrnI4HMEdyv/dM9G7R2W6KGF2S7PB8E8xmBc1+w1FU7nk/qBw3X4g1VXud9rUaDFpU8r9UHVmnz4U1BxeJg6inc+21TTbM+eWmz3tqwXU0PPajmdZ/rleZMxzOVLzsOH9TtH87XpqptWl+a7KjnYDiPy7QjXT7nraHwVz+dlZXadd88Pb+iTEMGF6g10RLQc26sPmMKNzaVHWzQ3Cc/1PEvP624VSs99mNP56po2as7Vj6gjaUZGlWU7/hs1j6WfI0p93PaP3uw1+fA5ERtf6tUSWlJWvPGV3pnf52GFmUGPDaD/czP+ThPn1eEo6a1RktXzlfD/Q+oYsMqPdL+fxqWO1KGDK/zhnA+L7Y/A326RSGNv0AdtrXp/vc+0SOfvqPh/bKUHJfuEuucr+2lbS/q9YWv64YbbpDFYvGaZ9j/Y6Jjxw7ZHnhQnVVVQV/MEx/u1GFbm89tHWVlqirZqae32VRe1ejY75z2sK1N/163Ta/tekG1rTWqba3RktJFLj/vte1x+du+7+Wv3tWyzyq1ePUel/N6U9tao1d3LtOrO5eptrXGb1r7uXxtC0ZnVZWjvp1/9qajrEzta9aoo6wsoPw9tUG47Hk6t5+ZYn3+niLQerCna9haovY1a1RZWh61enPvb5Wl5VpccIIqS8t9pou2w7Y2LV69J+C40JN4Kru3+rNvr92zX00LFqppwUJ1VlV1S3/Y1qa339+orqef1IGX39CTn1cGlLe9HOVVjR7rM9D7QGdVlUv5PLHHw46yMn393L8DKmOkuN8XnMviHqMDiduerivQmB9rzmVsrm3R+iWb1VzboubaFpW++JkaX31LZYeyteW9vardV+/YHyjnPH1tc95eUV3hct9/Z/M72vbSDq/n9ZafM09zAfc+H0jf3mvbo63VW7TXtifgOgiWI/bvqpXW7lfFgQaP6ezX7dwu7nXhKY23fNzbaN3yL7V074sBz3/c50ve5lTu7eBrbudvDuZtv6/jPM05vc37nMvrPB91Tue+vaewl6+q8pA2vfqVNr/6VUDj0N++UMsRaj/ytt1bO9W21qi8qlFf7qlVeVVjxO4n9meXd/e97vGZItA68zW2nPtUoM8u7sc7H7fXtkclVfv09hd1em1jqeO5y5uqykPqWCltf3O34zq83dOaa1sc/aqiuiKg8ta21uitHR/ri5Is7a6u9prO/TkxEryN+0D7p6/YH+x9wew5crgO29r06kflKlm2zXEvqaiu0OpNr2nE8x+p/uud6qyqUvVTi/XlG+Ue70WRKkegc/1o17E9/y1f1ztijdR9zhjOPNDbZxie+pt9W1ldqT7c9qG0KlHb39ztMp+KpFA+HwlkjuCc9tWdy/TWjo+1aOWBbs8HwXxG4NwXnJ9Pqr6u9Dnvs5fh//a8HXQs9nbNnoQ7VpprW7TmzTJ9+tlXGvnuUtW8/rbLM5Uvu2urVN7+oVbs+dSlnoPhMpf2M28Nhb/66ayq0oGX39DzOk5VX1cG/Jwbqxgcbmwqr2rU12V7lPz8M177sadz2ecDL6+pcfls1l4XvurE/Zz2zx7s9VlxoMHxrLHmzTI9u3ZvUGMz2PZwPi7Sn//UttZo9abXNPTlNdq/9n1ttZVpr22Pz3lDOJ8X25+BQh1/gTpsa9O720q1b/cwfXVoX7dY53xtS7e/GFCevMoJAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAacL+NgyjqSms420tHaptanf87Etz2xGPxztrbG/0eGxLR3NA6aKhsb1R9W31ET1vV119RPLxxrldIpFXtPLuDefvKQKtB2/jMBr15u1cjZ1yOZe/2IDAuLehv3q1p/eUzte+cMoVbn5G4zcxNpJlDLcs7rrq6tXp453YzulCPTYWPJW3rdF33GhrbFdLfWtA+fvKyz0f97Tu919v5/VX3mBEsm+Hq7X96Byqo6kjoOv2VQ8dzf6vxbl+nfNynhP54m2+5G1OZd8eyDzLWxn8HevpOE/HBDPv81buQOvJLIHOXz2Nq0iOKefyhNOP3PMJdn4e7pwo0HjgLz76GltmPOv4agf3Zy9nkX6OaWrt9NoeznUdqXHlbdzHUm95vnFuD4/3knqbujq/aaNoxI9QRauO/cUD+7wv2s//gYr0/SmcsRNKDPf2fBBI+wb67OQvbSh1GMw1BjO3dj/Ok0Dqxv2zulDGi3Od+Zu3hiKYeNLY3qXEIObusYjB0Xq28HQtvj4jcufpc9tgOd8fQhmbgbZHLJ/PfI3pcGNENPujc501t3muw2DjetgLEw1zblNGfOj/8eKqZz8POO09b5Z4Pd7+heU3r7rR47GPbvqny+/fpOsX8PlD5a1M4ai+4OcRz9NZMO3Sk/LuDefvKYKtBzPr7bpyq3TPCtPOd6wIp82HhplXoOcJV/0Nf5HyBkU835DL4kU4MTza8T/S3rzlfUlSpp/9kTqPN/Z7caZyInpeX2LdB509u3KXfiBp88OrtTmA9L7qZ+X8dcEfn3f0r3DnRN6Od93ue24XahkCPS6Y/AO7nt7DjHElRa5+Qs0nEmPb/uziS0D1GaGxFQpf58w8nKNTdK7HfZG+j9398kHdrYNe9/t7ToyEWI/ZnnS/8ec/XdbjvcTyP39UtSSlDpBkXkwJRKzquKfN+2Ld152FUhZv7RiJ9g00j2jXYVjjxtr9I8JArivRWqXc0cEd40uw89ZI+90nNdIngX8BcW+Kwf4Ecy2e0nr63DZYK+evc/TFUOq2N7SHrzgQeow4+gwU7etPSj/69xNvH5HU/VzBlp9XOQEAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADBN2N8xkTnnVhm33xHy8fMumqoRRRmSpB0VNp/vwvrTzDHd3lc276KpkqTfvfiWJOmOGXdK6v5Oq99O/F+X75mwp/vze38PueyBumPGnRqSdfQN7bvrd0XknYJ5SxZLit47J53bJVzu7RrJvHvD+XuKQOvB2ziMRr15O9e9w1o0/iff85sOwXFvQ3/1ak+/o8KmBx7Y43VfuG0TzH3An6y775Lumx/xMoZaFm/fM5G3ZLGSxo7xm0fHtpJucT7QY2PBU3ln3n6GJOmTPzzr8ZiZt5+hvCE5AeVfvbvW63tz3fNxT2u/79/72n0+z+vrHMGKZN8O10UnD1Vd2ZeacPV0TZ46sNt+9+u2t5unujj5ihP9fs+Ec/1W767VoodelOQ6J/LF23zJ25zKvn13/S6/cztvZfA3R/N0nKdjgpn3OZfbOV2g9WSWQOevnsZVJMeUXbj9yD2fYOfn4c6JdlTYHM8uvviLj77GVqSeOXzx1Q5bN23T5td3etwX6eeYG37UX6eOGO1xn3NdR2pceRv3Uuzev99bnm92VNg0Z/5qSZ7vJW2P36cB6QNVf9kfJPm+F5ktWnXsb35gn/d5mmPFQqTvT+HEqlBiuLfng0DaN9BnJ39pQ6nDYK4xmLm1s+rdtXrm7o+6bQ+kbj7ZtVmP7wjuGHfOdeZv3hqKYOYjD56Sq8ShwwKeu8ciBkfr2cLTtfj6jEhy/U4DT5/bBuvkK07UK8986bU87kL9zC+Wz2e+5g2hxgj7M1A0++OOCpt+//IuSdKvzkvUpH6TutXhN5+3/ymgPMNemIhLS5MRxvEZ1iTlpCU7fvYl1dK9uO7HpCenezzWmpQaULpoSE9OV5YlK6Lnjc/Oikg+3ji3SyTyilbeveH8PUWg9eBtHEaj3rydKz1BLufyFxsQGPc29Fev9vSe0vnaF065ws0vLv2bGBvJMoZbFnfx2VlKyMvzut+u00OsD/TYWPBUXku677hhSU+WNSsloPx95eWej3ta9/uvt/P6K28wItm3w5WSfHQOlZSWFNB1+6qHpFT/1+Jcv855Oc+JfPE2X/I2p7JvD2Se5a0M/o71dJynY4KZ93krd6D1ZJZA56+exlUkx5RzecLpR+75BDs/D3dOFGg88BcffY0tM551fLWD+7OXs0g/x6SlJHhtD+e6jtS48jbuY6m3PN84t4fHe0lWhuIzvmmjaMSPUEWrjv3FA/u8z9McKxYifX8KZ+yEEsO9PR8E0r6BPjv5SxtKHQZzjcHMrd2P8ySQunH/rC6U8eJcZ/7mraEIJp6kJ8crKYi5eyxicLSeLTxdi6/PiNx5+tw2WM73h1DGZqDtEcvnM19jOtwYEc3+6FxnqRbPdRhsXOdVTgAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEyTMGfOnDmhHNjW1qa5c+fqz3/6k1KPGyjL9OmK9/E+bU+syQmaMjTX5R1k3bZ1dqlj2zZln3qSThh7nHLTLY79zmkNGRo/sJ9OKJoka6JVKQlWTeg30fHzuLzxyrbkOP6275NhaEjWUE0elK//NyI/oPehGYY0OneMJheecDQPH5zL4WtbMOLS0o7Wd1raNz97q/vOLh3ZsUOpP/mxEgoKAsrfU7uEy5qcoMmDc1zaz0yxPn9PEWg9WJMT9K2BmUrcs0vW889X+oDCqNWbyzhua1f8yo/17bOmKWNgodd0ZjAMQxOKswOOCz2Jp7J7qz97n0hJilfylClKOe00xaend09vSCMK05Uyfqyyp07SlFFFfvO2l+Pbw/OUkpTgsT4Dug/8Jy/n8nkSl5Ymy7RpiktNVdbEcQGVMVLc7wv2siQU9OsWo/3GbU/XFWjM7wHsZYxLS1OSNVEDxhc63tGa3S9Z8Qnx6j9jpI6bWqzUHKsGjC9UchDv97Tn6XyMp2327UVjC5SRnuG47xqGoeMLR2jQxOO8ntdbfs48zQWc+7ynOOveD7sMQ/ts+/Td4jOUkxL8lxQGypp8dPzts7Vq8klDlOXlXb32+rK3S5I1qVtduKfxVEce688wNHB4f32r/6SA5z/u8yVP8yePczIfczt/czBv+30d5z7n9DXvs5d3Ur/JrvPR/6TrNk/tQVISrBqXP04pCRYVjM5X8eQBAY1Df/tCKUcw9RNom3pqp8mFJ8iSkKLdh5r0vUkDlJdhidj9xJChEYXpGt9vnMdnioDrzMfYsvcpS0JKwM8ublm71MOu+nKNz5+o8QPzNWlQoeO5y5Muo0u76nZp+IQhGjplkOM6PN3T4tLSJBkqGJ2vgRP7KzElMaDytna0KCnR0Hnjxyg31ft90f05MRK8jftA+qev2B/KfWC0/i0AACAASURBVMHsOXK4DEljBueoeGKhUnOsKhpboM7EI+q0JmvQmecrNStPMiTr5PEaeEKxx3tRRMoRxFw/2nVsTU7QxOJsVdS3OmKN1H3OGM480NP90lN/s287deDpsiRaZBjS8ROHafCkYpf5VCSF8vlIIHMEO8OQhmQO0YTC0Zo2vNDl+SDYzwgcfSE54Zvnk1NOUVpOps9539EyDNX4/AlBx2Jv1+xJ+GPFkHVAphIS4pQ/9VvK//YJLs9U3nQZhrZXNGhi4UhN7j/GpZ6DOvt/xuUJQ3P9zltD4a9+OpuaZFWnvn3qt5SekxnQc24sP2MKJzZ1GYZ2VNh0wpAcZU7/ttd+7H6ulKR47aov14kDJumk4wc6Ppu1jyVfY6rbOf/zbG+vzxOH5Skzy6KisQVKSknUccfn68Tj+wU8NkP5zO+bz0M8f14RjtaOVkmGckZ/S9XD8vXdIWcrJyXH57whrM+LDUNj+o0IefwFqra5Ra1J23X+hEkakJnn0g+dyx93JE6vL3xdN9xwgywWi9f84gzDCOm7qxsaGpSVlaX6+nplZmaGfEEAAAAAAAAAAKD3C3TdgFc5AQAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATJMwZ86cOaEc2NbWprlz5+qGG26QxWKJcLFCU3awQTe9uFEjizKUl/FNmQ7b2vT8qt0anJ+mVEtit9/xjc7KSjU+tkBxmZlqeu5f3f5OHD5M8enpsS4mABO0b9mqmt/OVtK4cUooKOiV57PHNH+xK9B0kVTTWqPnP3te6xZt0sGvKrSic7k21mzUkKwhsiZaTSlDoGpaa7So5Hl9WfWFx/IdtrVp4YodWrvjsEYUZjjurZ2VlWq4/wG1fvSRbMOL9OqB/9PAjOO8Xl9Na42WbX/ZkcZbvr7E6p7fVNOsTa9sU9bATCVbk2Kejzt/bRhJznXe3N7Zrf7t15iUlqSSd7Z3u1Z/+0PVVNOs9Ys3at+XB5Q7OFsdLR0uvwdyDvc8nI9xn4d6SnuovFrv37tSuUOz1ZKY4LOePJ3b3jc6Wjq06ZVtOpKdohe++NrluGj2efcxGmmh5B9KnAjnuHC5j3FfY94eQ/d/uEov2TI0eECOSzk9tbVzH0vLSfV4znDt3VenuU99pg0VNo0cmNWt7rz1Qfv2jJREvfz5vpD7aCB9vLyuXI+8e6uGPPOetHKtksaMDuj+7q1f+Noe7XuMGeforKxU5dyH9PmiTTqwp1V5x/dTsjUprHtHeV257lk3V8OyhisnJcdlXyD36mjHm2DYy5KWlK6Xty8Nuj7K68p1/4r71PaedGhzjXIHZ6sxzubz+nYcrtBfli3X1n1tKshIDWvMRIK/fug8j5dhOOZ/7mOvqaZZXy5YoYb7H5C+XKukogJTPmfwNtd3395ZWalDDz+ujRva9dW2cq00PtKgnEE+29reP3KP5KvsjXKPsdZXHHaP2851XV3ZqCX/WK3CwTnKykrxeP5ozR0Dma8FO4/yFRfC4Ryjk1MP6eEN97mcI5LxJFJ5BVoXznE425LtiEHZlmy9veutiF3Tm2ueVu7jy2R7f402bZUOrilVynsvyTLq+IDGZmdlpfY9Pk9vJm7TcbnDekw9S9EbI9Hgraz2OKHCVD3/xX6t3XFYA5MTtW3plqDGoDNf93hf/dNfbLDHsJT4Ti145EG/6wZ96n9MlFc16ss9tSqvanTZftjWpic+3KnDtjaPv+MbnVVVsj3woDrKyjz+3VlVFesiAjBJR1mZ2tesUUdZWa89nz2m+YtdgaaLpNrWGn247UNpVaJKVpXprf1v6NWdy1TbWmNaGQJV21qjV3cu81q+w7Y2LV69R4tX73G5t3ZWValpwUI1LVio6spdWlK6yOf11bbWuKTxlq8vsbrnN9e2aP2SzWqubekR+bjz14aR5Fznnurffo21++o9Xqu//aFqrm3Rple/0uZXv1JzbUu330PJw5n7PNRT2tp99Tq4tUq1++r91pOnc9vrw/7z/oMN3Y6LZp93H6M9If9Q4kQ4x4XLfYz7GvP2GHrg5Tf05OeV3crpqa2d+5i3c4Zr/8EGraht0Utf7vdYd976oH17eVVjWH00kD6+17ZHFbs3K/GZF9S0YGHA93dv/cLX9mjfY8w4R2dVlWr//ZrKDmVry3t7v4lZYdw79tr2aGv1Fu217em2L5B7dbTjTTDsZdlr2xNSfey17dHu/Xu0952DjnuCv+vbXV2tDWWpWvZZZdhjJhL89UPnebzz/M997DXXtqj89S+Vteo1tT/7lGmfM3ib67tv76yqUvVTi7Xlvb3a8MlmLd37ot+2trdlVeUhr7HWVxx2j9vOdV1xoEFau//o315Ea+4YyHwt2HmUr7gQDucY/dXhXd3OEcl4Eqm8Aq0L5zjsHIP22vZE9JpWfPGCOp94TrX/fk1b3tur8te/VNujjwQ8NjurqnRgyVP699ev9ah6lqI3RqLBW1ntcWLP7lpHX6840BD0GHTJ08c93lf/9Bcb7DFs16GmgMrRpxYmAAAAAAAAAABAz8bCBAAAAAAAAAAAMM0x9QULtpYO1Ta1y9bSEeui9HhGY6PP3wGgN+mqq1dndbXP/TBHY3uj6ts813dje+TuNbG657c1tqulvjWs4/sKf3Xf0Rze/r4i2D7qqY/Y+3so+YXC1zgON99jhT1WBDPmndvZ/nso5wxXoGPTX3nd9weqJz7LhXotgebdVwVyr45WvAlGNGOTt+trOdLcbVs0+5k/0e6H/ubqkcg/kPN7SuevD7r3D0+xNpT5na2lQ81tRyRJHU0dXuN3tOeOfWk+Fol40tPmKtG+pkDHpvPY6an1HKl5UDT1pmdBf7Gh5T/xy59jamHiqmc/j3UReo36G/7i83cA6E2qL/h5rIuA/7h51Y2mnCdW9/w3b3k/Juftify1wcr568La31cE21c99TGz+7tZ47gvCyVWhNvOkYpPh62J0ij/X1rqr7x96dmsL12LmQKpt74eb7xdX0dzP0mu89e+3M9iPVd3OX/qAJd9wfbBSMXaq579XHnNHfqBpM0Pr9bmiOQavL40H+uL8SRS11TsZXtQY7P46Jcn99R65jktsvzFhr+/WxpQPrzKCQAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGmOqe+YmHfRVI0oytCOCluffj9jJGTdfZfL90q4/w4AvUneksVKGjvG6/6ObSUxf7ftseKOGXdqSNZQj/t21++K2DtJY3XPn3n7Gcob4v/d695U767tM+8/nXfRVEne34l98hUn+nw3qb/9fYW/enI38/YzJLm+J9fe3yWZ0ud9jeNwRDIG9HT2WBHMmHduZyn4tg43Ptmt+3K/Xl+xw286f+V13x+onvgsF+q1BKInXm+kBHKvjla8CUY0Y5O36/t4R5nuLK1x2RbNfuZPtPuhv7l6uPzN9e3n79hWovrL/uCyz18fdO8fnmJtKPO7eRdNVfPX9frqgVWacPV0TZ460GO6aM8d+9J8LBLxpKfNVSJ1TQv2/s7jvkDHZse2Eu297rKIlinS9RypeVA09aZnQX+x4dqzR+nCv/nP55hamMiwJiknLVkZ1qRYF6XHi0tP9/k7APQm8dlZSsjL87q/MzvLxNIc29KT05Vl8Vzf6cmRu9fE6p5vSU+WNSslrOP7Cn91n5Qa3v6+Itg+6qmP2Pt7KPmFwtc4DjffY4U9VgQz5p3b2f57KOcMV6Bj01953fcHqic+y4V6LYHm3VcFcq+OVrwJRjRjk7frsyamSnJdmIhmP/Mn2v3Q31w9XP7m+vbze0rnrw+69w9PsTaU+V2GNUmyHP3ILiktyWv8jvbcsS/NxyIRT3raXCXa1xTo2HQeOz21niM1D4qm3vQs6C82WC2BLTnwKicAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYJmHOnDlzQjmwra1Nc+fO1Q033CCLxRLhYoWmyzC0+1CTvjdpgPIyXMtkTU7QlKG5Sv3Pl2+4/45vxKWlyTJtmhIK+nX/e/p0xfNF2MCxobNLR3bsUOpPfqyEgoJee764tLSAYleg6SKp5UiLDEMaNmqwio4v0NiCcZpceIKsiVbTyhAow5BG547xWj7DMDShOFv/b0S+y73VMAwlT5kiyyknKzUzTxP6TfR5fSkJVpc03vL1JVb3/CRrogaML1RymF8QGal83Plrw0hyrnNP9Z9kTVTR2AKl5lg9Xqu//aEzVDA6X8WTByjJmuTye+Dn8HyM53moa9quri7Vfd2g478zVGk5qX7ryZ29byRZk5RkTVT/cYXKzEzpdlw0+7z7GO0J+YcSJ8I5LlzuY9zXmDcMQ4ljxih76iRNGVXUrZzube3ex7ydMxxGl6HDu2o0dUyhZowu8Fh33vqgNTlBkwfnKDfdElYf9dfHuwxDu+p3aUL+RGVMm6GU004L+P7u637mabsZ9xgzztHV1CTDkPrPGKnibw929JVQ7x1dhqF9tn36bvEZyknJ6bY/kHt1tONNMFISrBqXN16WhJSg6+NofyzXhPyJGjTuOMc9yNf1dalL5XW7NH34cZoxojDsMRMJPvuhyzy+n2P+52nsdbR2yDCkzO/MUOp3TjPtcwZvc/1u2w0pccxo5R3fT8WjBupb/Sf5beuUBKvG5Y9TRnq611jrLQ57itv2uk5JStD2miZNPmmIsnx8aW+05o7+52PBzaP8xYVw2GP0lKHZOtR6oNs5IhlPIpFXMHVhj8OT+k12xKBJ/SYr25ITuRhpSEOyhihl3AQlTZyovJH91G/iIKWeelLg909JmeMnacKAqT2mnu2iNUaiwVNZ7XFi+CmDZc1K0YTibJ04LE/W5PgQnmW+4e0e769/+osN1uQEjS1K1YJHHvS7bhBnGIYRdMklNTQ0KCsrS/X19crMzAwlCwAAAAAAAAAA0EcEum7Aq5wAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAA9VmdlpRruf0CdlZUuP/vTvmWrDv34p2rfstWEUsKbmtYaPbH5cT2x+XHVtNbEujh9UlNNsz5fvFGHyqv1+eKNaqppjllZDtva9NA7X+mhd77SYVtbzMrRW/iKaZ72BTKeDtvatHDFDo/172ufJ2UHG/TbJz9T2cGGAK8odOV15brhk+tVXlce9XMhcE01zVr95Oda9eTnQceWcI6Vjo6ButtuV91ttwd03wd6AuY96Cv89eWa1hotKnleNa01Lj/jG9GeFzvP64Kd40WT4/5999yA0rMwAQAAeqzOqirZHnhQnVVVLj/701FWpvY1a9RRVmZCKeFNbWuNXt25TK/uXKZaHlaiorm2ReuXbFbtvnqtX7JZzbUtMSvLYVubFq/eo8Wr9/SIB6OezldM87QvkPF02NamJz7c6XVhwts+T8qrGvXlnlqVVzUGeEWh22vbo63VW7TXtifq50LgmmtbtOnVr7T51a+Cji3hHCsdHQNNCxaqacHCgO77QE/AvAd9hb++XNtaoyWli1TbWuPyM74R7Xmx87wu2DleNNnv381PPxNQehYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmCYx1gUAAADwp6uuPtZFAHq0juaOWBcBIeqqq1dndXW3beGwtXSotqm92zYAAIBIaWyP/vdQwbfePr9jYQIAAPR41Rf8PNZFAHq0lfPXxboICFE04ttVz34e8TwBAACc3bzqxlgX4ZjX2+d8vMoJAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAafiOCQAA0OPlLVksie+aALw5+YoT+Z6JXipvyWIljR3jsq1jW0lY8W7eRVM1oijDZduOCluvfw8xAADoOe6YcackvmsiluZdNFVS7/2uCRYmAABAjxefnRXrIgA9WlJqUqyLgBDFZ2cpIS/PZVtnmDEvw5qknLTkbtsAAAAiJT05PdZFOOb19vkdr3ICAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaRLmzJkzJ5QD29raNHfuXN1www2yWCwRLhYAAMBRcWlpskyfrvi0tG9+TvfzRWudXTqyY4dSf/JjJRQUmFNQeGQY0ujcMZpceIKsidZYF6dPSrImqmhsgVJzrBowvlDJMfwSPMMwNKE4W/9vRL5SLYkxK0dv4SumedoXyHiyJidoytBcj/Xva5+7LsPQ7kNN+t6kAcrLiO7zXpdhaJ9tn75bfIZyUnKiei4Ey1DB6HwVTx4QQmwJ59ij8SR5yhSlnHaa//s+0EMw70Ff4a8vpyRYNaHfRFkTrS4/4xvRnhc7z+uCmeNFm2EY6ho/Xn9fudLvukGcYRhGKCdpaGhQVlaW6uvrlZmZGXJhAQAAAAAAAABA7xfougGvcgIAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZJjHUBAAAAAISms7JStvmPSZIyrrhcCYWFMS4RgL6C+AIA8KapplmbXtkmQ9K3zh+rtNzUWBcJvRALEwAAAEAv1VlVpaYFCyVJqT/6Lz44BBAxxBcAgDfNtS3a9OpXkqTjTxvKwgRCwqucAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApkmMdQEAAAAAhCahoEBpv/m142cAiBTiCwDAm9Qcqyb+cLSM//wMhCLOMAwjlAMbGhqUlZWl+vp6ZWZmRrpcAAAAAAAAAACgFwl03YBXOQEAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANImhHmgYhiSpoaEhYoUBAAAAAAAAAAC9k329wL5+4E3ICxM2m02SVFxcHGoWAAAAAAAAAACgj7HZbMrKyvK6P87wt3ThRVdXlw4cOKCMjAzFxcWFXEAAiKSGhgYVFxdr3759yszMjHVxAMCB+ASgJyI2AeipiE8AeiJik3+GYchms2nAgAGKj/f+TRIh/4+J+Ph4HXfccaEeDgBRlZmZyQ0CQI9EfALQExGbAPRUxCcAPRGxyTdf/1PCji+/BgAAAAAAAAAApmFhAgAAAAAAAAAAmCZhzpw5c2JdCACIpISEBJ1++ulKTAz5bXUAEBXEJwA9EbEJQE9FfALQExGbIiPkL78GAAAAAAAAAAAIFq9yAgAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmADQ491999068cQTlZGRoYKCAp1//vkqLS11SdPa2qrZs2crLy9P6enp+vGPf6zKykqXNHv37tXMmTOVmpqqgoICXXfddTpy5IiZlwKgD5s7d67i4uJ07bXXOrYRmwDEyv79+/XLX/5SeXl5slqtmjBhgj7//HPHfsMwdMstt6h///6yWq0688wztX37dpc8ampqNGvWLGVmZio7O1u/+tWv1NjYaPalAOhDOjs7dfPNN2vo0KGyWq0aPny47rjjDhmG4UhDfAIQbR9//LG+//3va8CAAYqLi9Mrr7zisj9ScWjTpk065ZRTlJKSouLiYt1zzz1Rv7behIUJAD3eRx99pNmzZ2vNmjVavny5Ojo6dPbZZ6upqcmR5ne/+51ef/11vfjii/roo4904MAB/ehHP3Ls7+zs1MyZM9Xe3q5Vq1bpmWee0dNPP61bbrklFpcEoI9Zt26dHnvsMU2cONFlO7EJQCzU1tbqpJNOUlJSkt5++21t27ZN999/v3Jychxp7rnnHj388MOaP3++1q5dq7S0NJ1zzjlqbW11pJk1a5a2bt2q5cuX64033tDHH3+s3/zmN7G4JAB9xN/+9jc9+uijeuSRR1RSUqK//e1vuueeezRv3jxHGuITgGhramrSt771Lf3jH//wuD8ScaihoUFnn322Bg8erPXr1+vee+/VnDlztGDBgqhfX69hAEAvU1VVZUgyPvroI8MwDKOurs5ISkoyXnzxRUeakpISQ5KxevVqwzAM46233jLi4+ONiooKR5pHH33UyMzMNNra2sy9AAB9is1mM44//nhj+fLlxmmnnWZcc801hmEQmwDEzvXXX2+cfPLJXvd3dXUZRUVFxr333uvYVldXZ1gsFmPx4sWGYRjGtm3bDEnGunXrHGnefvttIy4uzti/f3/0Cg+gT5s5c6Zx2WWXuWz70Y9+ZMyaNcswDOITAPNJMpYtW+b4PVJx6J///KeRk5Pj8lx3/fXXG6NGjYr2JfUa/I8JAL1OfX29JCk3N1eStH79enV0dOjMM890pBk9erQGDRqk1atXS5JWr16tCRMmqLCw0JHmnHPOUUNDg7Zu3Wpi6QH0NbNnz9bMmTNdYpBEbAIQO6+99pqmTp2qn/70pyooKNDkyZO1cOFCx/5du3apoqLCJT5lZWVp2rRpLvEpOztbU6dOdaQ588wzFR8fr7Vr15p3MQD6lBkzZuj9999XWVmZJGnjxo1auXKlzjvvPEnEJwCxF6k4tHr1ap166qlKTk52pDnnnHNUWlqq2tpak66mZ0uMdQEAIBhdXV269tprddJJJ2n8+PGSpIqKCiUnJys7O9slbWFhoSoqKhxpnD/4s++37wOAUCxZskRffPGF1q1b120fsQlArJSXl+vRRx/V73//e/3lL3/RunXrdPXVVys5OVkXX3yxI754ij/O8amgoMBlf2JionJzc4lPAEL25z//WQ0NDRo9erQSEhLU2dmpO++8U7NmzZIk4hOAmItUHKqoqNDQoUO75WHf5/yKzWMVCxMAepXZs2dry5YtWrlyZayLAuAYt2/fPl1zzTVavny5UlJSYl0cAHDo6urS1KlTddddd0mSJk+erC1btmj+/Pm6+OKLY1w6AMeyF154Qc8//7wWLVqkcePGacOGDbr22ms1YMAA4hMAHGN4lROAXuPKK6/UG2+8oRUrVui4445zbC8qKlJ7e7vq6upc0ldWVqqoqMiRprKystt++z4ACNb69etVVVWlE044QYmJiUpMTNRHH32khx9+WImJiSosLCQ2AYiJ/v37a+zYsS7bxowZo71790r6Jr54ij/O8amqqspl/5EjR1RTU0N8AhCy6667Tn/+8591wQUXaMKECbrwwgv1u9/9Tnfffbck4hOA2ItUHOJZzz8WJgD0eIZh6Morr9SyZcv0wQcfdPuvcFOmTFFSUpLef/99x7bS0lLt3btX06dPlyRNnz5dmzdvdrlxLF++XJmZmd0e3AEgEGeccYY2b96sDRs2OP5MnTpVs2bNcvxMbAIQCyeddJJKS0tdtpWVlWnw4MGSpKFDh6qoqMglPjU0NGjt2rUu8amurk7r1693pPnggw/U1dWladOmmXAVAPqi5uZmxce7fhSVkJCgrq4uScQnALEXqTg0ffp0ffzxx+ro6HCkWb58uUaNGsVrnP4jYc6cOXNiXQgA8GX27Nl6/vnn9dJLL2nAgAFqbGxUY2OjEhISlJSUpJSUFB04cECPPPKIJk2apJqaGl1++eUqLi7WrbfeKkkaNmyYli5dqvfee08TJ07Uxo0bddVVV+mKK67QOeecE+MrBNAbWSwWFRQUuPxZtGiRhg0bposuuojYBCBmBg0apNtuu02JiYnq37+/3nnnHc2ZM0d33HGHJk6cqLi4OHV2duquu+7S2LFj1d7erquvvlrNzc2aN2+eEhMT1a9fP61du1aLFy/W5MmTtXv3bl1++eU6++yzdckll8T6EgH0UiUlJXrmmWc0atQoJScna8WKFfrLX/6iX/ziFzrrrLOITwBM0djYqG3btqmiokKPPfaYpk2bJqvVqvb2dmVnZ0ckDo0cOVKPPvqotm7dqpEjRzri3W233aYpU6bEtgJ6CgMAejhJHv889dRTjjQtLS3G//7v/xo5OTlGamqq8V//9V/GwYMHXfLZvXu3cd555xlWq9XIz883/vCHPxgdQbkO7AAABAxJREFUHR0mXw2Avuy0004zrrnmGsfvxCYAsfL6668b48ePNywWizF69GhjwYIFLvu7urqMm2++2SgsLDQsFotxxhlnGKWlpS5pqqurjZ///OdGenq6kZmZaVx66aWGzWYz8zIA9DENDQ3GNddcYwwaNMhISUkxhg0bZtx4441GW1ubIw3xCUC0rVixwuPnTBdffLFhGJGLQxs3bjROPvlkw2KxGAMHDjTmzp1r1iX2CnGGYRgxWhMBAAAAAAAAAADHGL5jAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAA4Ncll1yi888/P9bFAAAAANAHJMa6AAAAAABiKy4uzuf+W2+9VQ899JAMwzCpRAAAAAD6MhYmAAAAgGPcwYMHHT//+9//1i233KLS0lLHtvT0dKWnp8eiaAAAAAD6IF7lBAAAABzjioqKHH+ysrIUFxfnsi09Pb3bq5xOP/10XXXVVbr22muVk5OjwsJCLVy4UE1NTbr00kuVkZGhESNG6O2333Y515YtW3TeeecpPT1dhYWFuvDCC3X48GGzLxkAAABADLEwAQAAACAkzzzzjPLz8/XZZ5/pqquu0m9/+1v99Kc/1YwZM/TFF1/o7LPP1oUXXqjm5mZJUl1dnb773e9q8uTJ+vzzz/XOO++osrJSP/vZz2J8JQAA/P/27h+lmSCO4/B3/QOWgoLEKk0kTZR4CLscwDJFKhvbQEgZsE/tDXKAVKmsrLQU9gKCVmnX2AnyvpXgKPg81e4Uy2/L4TMwAJQkTAAAAF9ydnaWyWSSTqeT8Xicvb29HB4eZjQapdPpZDqd5uXlJY+Pj0mS+Xyefr+f2WyWbrebfr+f29vbrFarPD09/fDfAAAApbhjAgAA+JLT09OP5+3t7RwcHKTX632sHR0dJUmen5+TJA8PD1mtVv+9r6Ku65ycnHzzxAAAwG8gTAAAAF+yu7v76b2qqk9rVVUlSd7e3pIk6/U6g8EgNzc3/3yr1Wp946QAAMBvIkwAAABFnJ+fZ7FYpN1uZ2fHVgQAAP4qd0wAAABFXF1d5fX1NZeXl7m/v09d11kulxkOh2ma5qfHAwAAChEmAACAIo6Pj3N3d5emaXJxcZFer5fr6+vs7+9na8vWBAAA/opqs9lsfnoIAAAAAADgb3AsCQAAAAAAKEaYAAAAAAAAihEmAAAAAACAYoQJAAAAAACgGGECAAAAAAAoRpgAAAAAAACKESYAAAAAAIBihAkAAAAAAKAYYQIAAAAAAChGmAAAAAAAAIoRJgAAAAAAgGKECQAAAAAAoJh3bv8p1u6sZCgAAAAASUVORK5CYII=", + "text/plain": [ + "" ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# load groundtruth\n", + "from pyannote.database.util import load_rttm\n", + "_, groundtruth = load_rttm('ES2004a.rttm').popitem()\n", + "\n", + "# visualize groundtruth\n", + "groundtruth" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "p_R9T9Y5Ynp9" + }, + "source": [ + "For the rest of this notebook, we will only listen to and visualize a one-minute long excerpt of the file (but will process the whole file anyway)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 230 }, + "id": "bAHza4Y1Ynp-", + "outputId": "c4cc2369-bfe4-4ac2-bb71-37602e7c7a8a", + "vscode": { + "languageId": "python" + } + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ai082p4HYnp7", - "outputId": "bb673846-8b58-4743-cea2-6c6270632d7f", - "vscode": { - "languageId": "python" - } - }, - "outputs": [], - "source": [ - "!pip install -qq https://github.com/pyannote/pyannote-audio/archive/refs/heads/develop.zip\n", - "!pip install -qq ipython==7.34.0" + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3de3RV5Zk/8CeQCzEhCRcDwXJTRLygoHQs6kztVAHrEmtntFJEpY5WB7V2quO4vNE6WmlrqaOzdGS0Y50q01bbsVbrXSvC4JVaKwuRCrZKoCYSAkJAsn9/8OPUlEsSck5Odvh81spaZO/3vPvd593nOfucL9m7IEmSJAAAAAAAAFKsR74HAAAAAAAA0FECDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYHHDpx99tlRUFCw3c9bb72103WTJk3KPH7YsGE7bHPjjTdm2rzzzjtx4oknxl577RXV1dVx2WWXxUcffZRZv3LlyvjSl74UI0eOjB49esQll1yy3TgfeOCBGDduXFRVVUVZWVmMGTMm7rnnntw+OSmXlrmNiFizZk3MmDEjampqoqSkJEaOHBkPP/xw7p6cFNs2d+eff/5262bMmBEFBQVx9tlnt2jbVeZ5m7lz50ZBQUF8/vOf7+Cz0b1le64jIubPnx+f+9znok+fPtGrV68YPXp0fO9734stW7a0aFdfXx9Tp06NioqKqKqqinPOOSfWrVuXWb9x48Y4++yzY/To0VFYWLjDuXzmmWd2OKba2tosPDsAAAAAe7bCfGz0g/WbOnV7fcqK2/2YSZMmxQ9+8IMWy/bee++drispKWnx+ze/+c0499xzWyzr3bt3RERs2bIlTjzxxBg4cGDMnz8/Vq5cGWeeeWYUFRXFDTfcEBERTU1Nsffee8dVV10Vs2fP3uEY+/btG1deeWWMGjUqiouL46GHHorp06dHdXV1TJw4sd373FENTQ2dur3Kksrdelwa5nbTpk1x/PHHR3V1dfz0pz+NffbZJ1asWBFVVVW7tc8dtaFhY6dur7SyV7sfM3jw4Jg7d27Mnj07SktLI2LrF9D33ntvDBkypEXbrjLP2yxfvjwuvfTS+Ou//ut273c2bamr69Tt9ezXb7cel825/tnPfhannXZaTJ8+PZ5++umoqqqKJ554Iv75n/85FixYED/+8Y+joKAgIiKmTp0aK1eujMcffzw2b94c06dPj/POOy/uvffeiNh6XJSWlsbFF18c999//y73YcmSJVFRUZH5vbq6ereeCwAAAAD+LC+BxwnffrpTt/d/32j/l/8lJSUxcODAdq/bpnfv3jtt89hjj8Ubb7wRTzzxRAwYMCDGjBkT1113XVx++eUxc+bMKC4ujmHDhsXNN98cERF33XXXDvs59thjW/z+1a9+Ne6+++6YN29eXgKPaY98qVO39+Dnf7lbj0vD3N51111RX18f8+fPj6KioojY+lcH+fLDM3/aqdv7yv+e0e7HHH744bFs2bJ44IEHYurUqRGx9a+ghgwZEsOHD2/RtqvMc8TWL8mnTp0a3/jGN+K5556LNWvWtGe3s6r20DGdur193v3Dbj0uW3O9fv36OPfcc2Py5Mlxxx13ZJb/wz/8QwwYMCAmT54cP/7xj+OLX/xiLF68OH71q1/Fiy++GOPGjYuIiFtuuSU+97nPxXe/+90YNGhQlJWVxW233RYREc8///wu57K6ujpvASYAAABAd+WSVnmwYMGCGD16dAwYMCCzbOLEibF27dr43e9+t1t9JkkSTz75ZCxZsiT+5m/+JltDpZ2yNbcPPvhgjB8/PmbMmBEDBgyIQw45JG644YbtLrFDS1/+8pdb/G/+u+66K6ZPn5717WTzNfzNb34zqqur45xzzsn2MLu1bMz1Y489FnV1dXHppZdut+6kk06KkSNHxn333RcRW+e8qqoqE3ZERBx33HHRo0ePWLhwYbvHP2bMmKipqYnjjz8+nn/++XY/HgAAAIDtCTx24qGHHory8vLMz6mnnrrTdeXl5ZnL2Gxz+eWXb9fmueeei4iI2traFl+URkTm9/Zex72hoSHKy8ujuLg4TjzxxLjlllvi+OOP351d3mOkYW5///vfx09/+tPYsmVLPPzww3H11VfHTTfdFP/6r/+6u7u9RzjjjDNi3rx5sWLFilixYkU8//zzccYZ2/+1SFeZ53nz5sWdd94Zc+bMae+u7vGyMddvvvlmREQceOCBO9zGqFGjMm1qa2u3u+xUYWFh9O3bt11zXlNTE7fffnvcf//9cf/998fgwYPj2GOPjVdeeaXNfQAAAACwY3m5pFUafOYzn8lcmiQioqysbKfrIrbeT+PjLrvsssyNc7fZZ599sj7O3r17x6JFi2LdunXx5JNPxj/90z/Fvvvuu93lrvizNMxtc3NzVFdXxx133BE9e/aMI444It599934zne+E9dee21Wt9Wd7L333nHiiSfGf/3Xf0WSJHHiiSdG//79t2vXFea5sbExpk2bFnPmzNnhGNm1bM51kiQ5HevHHXDAAXHAAQdkfj/qqKNi2bJlMXv27Ljnnns6bRwAAAAA3VFeAo9H/vkz+dhsu5SVlcWIESPavW6b/v3777TNwIED44UXXmixbNWqVZl17dGjR4/MdsaMGROLFy+Ob33rW3kJPO454d5O3+buSMPc1tTURFFRUfTs2TOz7MADD4za2trYtGlTFBcXt7mvbDjzh3/fqdvriC9/+ctx4YUXRkTEv//7v++wTVeY52XLlsXy5cvjpJNOyixrbm6OiK1/ObBkyZLYb7/92tRXtgx8bVGnbq+jOjrXI0eOjIiIxYsXx1FHHbXd+sWLF8dBBx0UEVvndfXq1S3Wf/TRR1FfX9/uuv2X/uqv/irmzZvXoT4AAAAAyFPg0aesc7+s7WrGjx8f119/faxevTpziZTHH388KioqMl+u7a7m5uZoamrKxjDbrbKkMi/b7UqyNbdHH3103HvvvdHc3Bw9emy98tybb74ZNTU1nR52RESUVvbq9G3urkmTJsWmTZuioKAgJk6cmJNtZGOeR40aFb/97W9bLLvqqquisbExbr755hg8eHDWx92anv36dfo2O6Kjcz1hwoTo27dv3HTTTdsFHg8++GAsXbo0rrvuuojYOudr1qyJl19+OY444oiIiHjqqaeiubk5jjzyyA7tx6JFi6KmpqZDfQAAAADgkla7pampabtrthcWFra4nEpjY+N2bfbaa6+oqKiICRMmxEEHHRTTpk2Lb3/721FbWxtXXXVVzJgxI0pKSjLtFy3a+r+t161bF3/6059i0aJFUVxcnPlC9Vvf+laMGzcu9ttvv2hqaoqHH3447rnnnu0u30LbdZW5veCCC+LWW2+Nr371q3HRRRfF0qVL44YbboiLL744V7vebfTs2TMWL16c+feOdIV57tWrVxxyyCEt+q+qqoqI2G45O9bRuS4rK4v/+I//iNNPPz3OO++8uPDCC6OioiKefPLJuOyyy+Lv//7v47TTTouIrX9hNWnSpDj33HPj9ttvj82bN8eFF14Yp59+egwaNCjT9xtvvBGbNm2K+vr6aGxszBwDY8aMiYiI73//+zF8+PA4+OCDY+PGjfGf//mf8dRTT8Vjjz2W9ecHAAAAYI+TsJ2zzjorOfnkk3e6LiK2+znggAMybYYOHbrDNl/5ylcybZYvX56ccMIJSWlpadK/f//k61//erJ58+YW29pRH0OHDs2sv/LKK5MRI0YkvXr1Svr06ZOMHz8+mTt3bnafjG4mLXObJEkyf/785Mgjj0xKSkqSfffdN7n++uuTjz76KHtPRjeyq3lNkiQ5+eSTk7POOivTtivNc3v2g+zPdZIkya9//etk4sSJSUVFRVJcXJwcfPDByXe/+93tXm91dXXJlClTkvLy8qSioiKZPn160tjY2KLNzo6dbWbNmpXst99+Sa9evZK+ffsmxx57bPLUU0918FkBAAAAIEmSpCBJOvFurQAAAAAAADnQI98DAAAAAAAA6CiBBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKlXmKuOm5ub47333ovevXtHQUFBrjYDAAAAAACkQJIk0djYGIMGDYoePbL/9xg5Czzee++9GDx4cK66BwAAAAAAUugPf/hDfOITn8h6vzkLPHr37h0RWwdeUVGRq80AAAAAAAApsHbt2hg8eHAmP8i2nAUe2y5jVVFRIfAAAAAAAAAiInJ2Gww3LQcAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6OQ881td/mOtNQLf0x5Xvxnd/clP8ceW7+R4K3Uz9xvq4d/GPon5jfURsrdMv3febrNbrXPSZZltWrYq1N30vtqxale+hZE132KdsHadpOt7/8vXf0XaQbbk49j7e5/uNTTHn6bfi/camrPXPnq2zjil1GSC3ukud7S77EdG99oXOlfPAY8OajbneBHRLtXW18euip6K2rjbfQ6Gb+WBjfcxdcm988P9PGj78YEO8PPe38eEHG7K2jVz0mWZbVq+Oxu/Nji2rV+d7KFnTHfYpW8dpmo73v3z9d7QdZFsujr2P9/l+Y1Pc+cwygQdZ01nHlLoMkFvdpc52l/2I6F77QudySSsAAAAAACD1BB4AAAAAAEDqFeZ6A03rN8WGBpe1gvba9OHmiIhY/9H6aGhqyPNo6E7WbVq3w+VN67JXr5vWbcpKP91N85qG2FJXl+9hZEXzmu5Tlzp67KfxeF+3ad0u31t2Viegs7R2jLa3r7/UuGFzfLA+fa9dup7GDZs7dXvZfG0A8Gfd7fy3O7xfdLc5ofPkPPB49Ppno7SoNNebgW6noV99xOSIm5bPilie79GwJ/jlNU/mewjdXt3pU/I9BHZgTzz2r55/Zb6HALuU62P0oh++lNP+IVfUbwDawvsFezKXtAIAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9nN/DY+KVn45hhwzJ9Wag23l1yaKY996j8fVhl8eYAw/L93DoRpY3vL3D63me+M3PRr9hfbKyjbrlH+yR90VoTb+590XRQQfmexhZsfmNxd3mniQdPfbTeLxfd9T1Maxy+E7X76xOQGdp7Rhtjx0dz7ecOS5GDOydlf7Zs71V29ip94TJ5msDgD/rbue/3eH9orvNCZ0n54FHSVlxlFb2yvVmoNsp3qsoIiLKCsuisqQyz6OhOykvLt/h8pLy7NXrkvLirPTT3fSoqoye/frlexhZsaWq+9Sljh77aTzey4vLd/nesrM6AZ2ltWO0vX39pd6lRdGnLH2vXbqe3qVFnbq9bL42APiz7nb+2x3eL7rbnNB5XNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6vWcOXPmzFx03NTUFDfeeGP8y7/8S/SucpMZaK/mLUk0Ll8fnxr1qajoXZHv4dDN9OpZGqP3PjRKC0sjIqKotDAGHTIgirN4481c9JlmBWVlUTJ+fPQo7z7vid1hn7J1nKbpeP/L139H20G25eLY+3ifpcU944jhfWOvksKs9c+erbOOKXUZILe6S53tLvsR0b32hT/blhtcccUVUVJSkvX+C5IkSbLea0SsXbs2Kisro6GhISoqfFkLAAAAAAB7slznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8uoH6jfVx7+IfRf3G+i7RT67taJwdHfv7jU0x5+m34v3Gpqy0y5Zcba+z94PcM6fp1hXmryuMAei61AjypS3n+bvzWWDLqlWx9qbvxZZVq3ZrXB19PLBVe95fvBd1nnw91+Y4+zyndEV1OT4eBR7dwAcb62Puknvjgw4GFdnqJ9d2NM6Ojv39xqa485llbQo82tIuW3K1vc7eD3LPnKZbV5i/rjAGoOtSI8iXtpzn785ngS2rV0fj92bHltWrd2tcHX08sFV73l+8F3WefD3X5jj7PKd0RXXrBB4AAAAAAAC7JPAAAAAAAABST+ABAAAAAACkXmG+B0D2rNu0LhqaGjr0+DT5+P5ma+yNGzbHB+s37XJ9PrQ2rt3pj+4p28cKnaMrvSYdQ8COdKU6xZ5pV591OvJZoHlNQ2ypq9utxwHZ05ZzUO9Fna+zPxuY49zxOY+upHHDRzntX+DRjVw9/8p8D6FT5WJ/L/rhS1nvMxu66rjoehwrdJRjCICuKFefdepOn5KTfoH2cQ7aNZmX7sNc0pV81LQ+p/27pBUAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDquYdHN3LdUdfHsMrhu/345Q1vp+o+IB/f32yN/ZYzx8WIgb13uv6t2sa8XPewtXG1V772g9zL9rFC5+hKr0nHELAjXalOsWfa1WedjnwW6Df3vig66MB2P27zG4vd/wOyqC3noN6LOl9nfzYwx7njcx5dyaKl78Xfzspd/wKPbqS8uDwqSyo79Pg0+fj+ZmvsvUuLok9Z8S7X50Nr49qd/uiesn2s0Dm60mvSMQTsSFeqU+yZdvVZpyOfBXpUVUbPfv3a/bgtVbv/uQvYXlvOQb0Xdb7O/mxgjnPH5zy6kt6luY0kXNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9XrOnDlzZi46bmpqihtvvDGuuOKKKCkpycUm+JhePUtj9N6HRmlhaZfoJ9d2NM6Ojr20uGccMbxv7FWy6+vItbVdtuRqe529H+SeOU23rjB/XWEMQNelRpAvbTnP353PAgVlZVEyfnz0KN+9e4B09PHAVu15f/Fe1Hny9Vyb4+zznNLVNDU1xc3f+07OcoOCJEmSrPcaEWvXro3KyspoaGiIioqKXGwCAAAAAABIiVznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN47CHeb2yKOU+/Fe83NnWoDQDs6eo31se9i38U9Rvrd7kMAAAA6FwCjz3E+41Nceczy1oNPFprAwB7ug821sfcJffGBx8LN3a0DAAAAOhcAg8AAAAAACD1BB4AAAAAAEDqFeZ7AHSuxg2b44P1m3a6DgBom3Wb1kVDU0Pm3wAAAEB+CTz2MBf98KV8DwEAuoWr51+Z7yEAAAAAH+OSVgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKnnHh57mFvOHBcjBvbe4bq3ahvd4wMA2ui6o66PYZXDIyJiecPb7ukBAAAAeSbw2MP0Li2KPmXFO10HALRNeXF5VJZUZv4NAAAA5JdLWgEAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gcceon/vkjjn2P2if++SDrUBgD1dn1594/QDvhR9evXd5TIAAACgcxUkSZLkouO1a9dGZWVlNDQ0REVFRS42AQAAAAAApESucwN/4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1CnPVcZIkERGxdu3aXG0CAAAAAABIiW15wbb8INtyFnjU1dVFRMTgwYNztQkAAAAAACBl6urqorKyMuv95izw6Nu3b0REvPPOOzkZOJAOa9eujcGDB8cf/vCHqKioyPdwgDxQBwB1AIhQCwB1AIhoaGiIIUOGZPKDbMtZ4NGjx9bbg1RWVipgQFRUVKgFsIdTBwB1AIhQCwB1APhzfpD1fnPSKwAAAAAAQCcSeAAAAAAAAKnXc+bMmTNz1nnPnnHsscdGYWHOrpwFpIBaAKgDgDoARKgFgDoA5LYOFCRJkmS9VwAAAAAAgE7kklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSr92Bx7vvvhtnnHFG9OvXL0pLS2P06NHx0ksvZdYnSRLXXHNN1NTURGlpaRx33HGxdOnSFn3U19fH1KlTo6KiIqqqquKcc86JdevWdXxvgE7RWh144IEHYsKECdGvX78oKCiIRYsWbdfHxo0bY8aMGdGvX78oLy+Pv/u7v4tVq1Z15m4AHbSrWrB58+a4/PLLY/To0VFWVhaDBg2KM888M957770WfTgngHRr7Zxg5syZMWrUqCgrK4s+ffrEcccdFwsXLmzRhzoA6dZaHfi4888/PwoKCuL73/9+i+XqAKRfa7Xg7LPPjoKCghY/kyZNatGHWgDp1pZzgsWLF8fkyZOjsrIyysrK4pOf/GS88847mfXZ+L6wXYHHBx98EEcffXQUFRXFI488Em+88UbcdNNN0adPn0ybb3/72/Fv//Zvcfvtt8fChQujrKwsJk6cGBs3bsy0mTp1avzud7+Lxx9/PB566KH49a9/Heedd167Bg7kR1vqwPr16+OYY46JWbNm7bSfr33ta/GLX/wifvKTn8Szzz4b7733XnzhC1/ojF0AsqC1WvDhhx/GK6+8EldffXW88sor8cADD8SSJUti8uTJLfpxTgDp1ZZzgpEjR8att94av/3tb2PevHkxbNiwmDBhQvzpT3/KtFEHIL3aUge2+dnPfhb/93//F4MGDdpunToA6dbWWjBp0qRYuXJl5ue+++5rsV4tgPRqSx1YtmxZHHPMMTFq1Kh45pln4rXXXourr746evXqlWmTle8Lk3a4/PLLk2OOOWan65ubm5OBAwcm3/nOdzLL1qxZk5SUlCT33XdfkiRJ8sYbbyQRkbz44ouZNo888khSUFCQvPvuu+0ZDpAHrdWBj3v77beTiEheffXVFsvXrFmTFBUVJT/5yU8yyxYvXpxERLJgwYKsjhfIjfbUgm1eeOGFJCKSFStWJEninADSbnfqQENDQxIRyRNPPJEkiToAadfWOvDHP/4x2WeffZLXX389GTp0aDJ79uzMOnUA0q8tteCss85KTj755J2uVwsg3dpSB774xS8mZ5xxxk7XZ+v7wnb9hceDDz4Y48aNi1NPPTWqq6tj7NixMWfOnMz6t99+O2pra+O4447LLKusrIwjjzwyFixYEBERCxYsiKqqqhg3blymzXHHHRc9evTY7s/bga6ntTrQFi+//HJs3ry5Ra0YNWpUDBkyJFMrgK5td2pBQ0NDFBQURFVVVUQ4J4C0a28d2LRpU9xxxx1RWVkZhx12WESoA5B2bakDzc3NMW3atLjsssvi4IMP3q4PdQDSr63nBM8880xUV1fHAQccEBdccEHU1dVl1qkFkG6t1YHm5ub45S9/GSNHjoyJEydGdXV1HHnkkfHzn/880yZb3xe2K/D4/e9/H7fddlvsv//+8eijj8YFF1wQF198cdx9990REVFbWxsREQMGDGjxuAEDBmTW1dbWRnV1dYv1hYWF0bdv30wboOtqrQ60RW1tbRQXF2e+9Nzm47UC6NraWws2btwYl19+eUyZMiUqKioiwjkBpF1b68BDDz0U5eXl0atXr5g9e3Y8/vjj0b9//4hQByDt2lIHZs2aFYWFhXHxxRfvsA91ANKvLbVg0qRJ8cMf/jCefPLJmDVrVjz77LNxwgknxJYtWyJCLYC0a60OrF69OtatWxc33nhjTJo0KR577LE45ZRT4gtf+EI8++yzEZG97wsL2zPw5ubmGDduXNxwww0RETF27Nh4/fXX4/bbb4+zzjqrPV0BKaUOABHtqwWbN2+O0047LZIkidtuuy0fwwVyoK114DOf+UwsWrQo3n///ZgzZ06cdtppsXDhwu2+1ADSp7U68PLLL8fNN98cr7zyShQUFOR5tECutOWc4PTTT8+0Hz16dBx66KGx3377xTPPPBOf/exn8zJuIHtaqwPNzc0REXHyySfH1772tYiIGDNmTMyfPz9uv/32+PSnP521sbTrLzxqamrioIMOarHswAMPzNxJfeDAgRER2905fdWqVZl1AwcOjNWrV7dY/9FHH0V9fX2mDdB1tVYH2mLgwIGxadOmWLNmTYvlH68VQNfW1lqwLexYsWJFPP7445vo3U4AAAbjSURBVJm/7ohwTgBp19Y6UFZWFiNGjIhPfepTceedd0ZhYWHceeedEaEOQNq1Vgeee+65WL16dQwZMiQKCwujsLAwVqxYEV//+tdj2LBhEaEOQHewO98T7LvvvtG/f/946623IkItgLRrrQ70798/CgsLW80WsvF9YbsCj6OPPjqWLFnSYtmbb74ZQ4cOjYiI4cOHx8CBA+PJJ5/MrF+7dm0sXLgwxo8fHxER48ePjzVr1sTLL7+cafPUU09Fc3NzHHnkke0ZDpAHrdWBtjjiiCOiqKioRa1YsmRJvPPOO5laAXRtbakF28KOpUuXxhNPPBH9+vVr0d45AaTb7p4TNDc3R1NTU0SoA5B2rdWBadOmxWuvvRaLFi3K/AwaNCguu+yyePTRRyNCHYDuYHfOCf74xz9GXV1d1NTURIRaAGnXWh0oLi6OT37yk7tsk7XvC9t8e/MkSV544YWksLAwuf7665OlS5cmP/rRj5K99tor+e///u9MmxtvvDGpqqpK/vd//zd57bXXkpNPPjkZPnx4smHDhkybSZMmJWPHjk0WLlyYzJs3L9l///2TKVOmtGcoQJ60pQ7U1dUlr776avLLX/4yiYhk7ty5yauvvpqsXLky0+b8889PhgwZkjz11FPJSy+9lIwfPz4ZP358PnYJ2A2t1YJNmzYlkydPTj7xiU8kixYtSlauXJn5aWpqyvTjnADSq7U6sG7duuSKK65IFixYkCxfvjx56aWXkunTpyclJSXJ66+/nulHHYD0astng780dOjQZPbs2S2WqQOQbq3VgsbGxuTSSy9NFixYkLz99tvJE088kRx++OHJ/vvvn2zcuDHTj1oA6dWWc4IHHnggKSoqSu64445k6dKlyS233JL07Nkzee655zJtsvF9YbsCjyRJkl/84hfJIYcckpSUlCSjRo1K7rjjjhbrm5ubk6uvvjoZMGBAUlJSknz2s59NlixZ0qJNXV1dMmXKlKS8vDypqKhIpk+fnjQ2NrZ3KECetFYHfvCDHyQRsd3Ptddem2mzYcOG5B//8R+TPn36JHvttVdyyimntAhEgK5vV7Xg7bff3mEdiIjk6aefzrRzTgDptqs6sGHDhuSUU05JBg0alBQXFyc1NTXJ5MmTkxdeeKFFH+oApFtrnw3+0o4CD3UA0m9XteDDDz9MJkyYkOy9995JUVFRMnTo0OTcc89NamtrW/ShFkC6teWc4M4770xGjBiR9OrVKznssMOSn//85y3WZ+P7woIkSZJ2/HUKAAAAAABAl9Oue3gAAAAAAAB0RQIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAA6LCzzz47Pv/5z+d7GAAAwB6sMN8DAAAAuraCgoJdrr/22mvj5ptvjiRJOmlEAAAA2xN4AAAAu7Ry5crMv//nf/4nrrnmmliyZElmWXl5eZSXl+djaAAAABkuaQUAAOzSwIEDMz+VlZVRUFDQYll5efl2l7Q69thj46KLLopLLrkk+vTpEwMGDIg5c+bE+vXrY/r06dG7d+8YMWJEPPLIIy229frrr8cJJ5wQ5eXlMWDAgJg2bVq8//77nb3LAABACgk8AACAnLj77rujf//+8cILL8RFF10UF1xwQZx66qlx1FFHxSuvvBITJkyIadOmxYcffhgREWvWrIm//du/jbFjx8ZLL70Uv/rVr2LVqlVx2mmn5XlPAACANBB4AAAAOXHYYYfFVVddFfvvv39cccUV0atXr+jfv3+ce+65sf/++8c111wTdXV18dprr0VExK233hpjx46NG264IUaNGhVjx46Nu+66K55++ul4880387w3AABAV+ceHgAAQE4ceuihmX/37Nkz+vXrF6NHj84sGzBgQERErF69OiIifvOb38TTTz+9w/uBLFu2LEaOHJnjEQMAAGkm8AAAAHKiqKioxe8FBQUtlhUUFERERHNzc0RErFu3Lk466aSYNWvWdn3V1NTkcKQAAEB3IPAAAAC6hMMPPzzuv//+GDZsWBQW+qgCAAC0j3t4AAAAXcKMGTOivr4+pkyZEi+++GIsW7YsHn300Zg+fXps2bIl38MDAAC6OIEHAADQJQwaNCief/752LJlS0yYMCFGjx4dl1xySVRVVUWPHj66AAAAu1aQJEmS70EAAAAAAAB0hP8mBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1/h//ffiQt3ubxgAAAABJRU5ErkJggg==", + "text/plain": [ + "" ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from pyannote.core import Segment, notebook\n", + "# make notebook visualization zoom on 600s < t < 660s time range\n", + "EXCERPT = Segment(600, 660)\n", + "notebook.crop = EXCERPT\n", + "\n", + "# visualize excerpt groundtruth\n", + "groundtruth" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "L3FQXT5FYnp-" + }, + "source": [ + "This nice visualization is brought to you by [`pyannote.core`](http://pyannote.github.io/pyannote-core/) and basically indicates when each speaker speaks. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 62 }, + "id": "rDhZ3bXEYnp-", + "outputId": "a82efe4e-2f9c-48bd-94fb-c62af3a3cb43", + "vscode": { + "languageId": "python" + } + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "qggK-7VBYnp8" - }, - "source": [ - "# Visualization with `pyannote.core`\n", - "\n", - "For the purpose of this notebook, we will download and use an audio file coming from the [AMI corpus](http://groups.inf.ed.ac.uk/ami/corpus/), which contains a conversation between 4 people in a meeting room." + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" ] - }, + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from pyannote.audio import Audio \n", + "from IPython.display import Audio as IPythonAudio\n", + "waveform, sr = Audio(mono=\"downmix\").crop(DEMO_FILE, EXCERPT)\n", + "IPythonAudio(waveform.flatten(), rate=sr)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkzox7QIYnp_" + }, + "source": [ + "# Processing your own audio file (optional)\n", + "\n", + "In case you just want to go ahead with the demo file, skip this section entirely.\n", + "\n", + "In case you want to try processing your own audio file, proceed with running this section. It will offer you to upload an audio file (preferably a `wav` file but all formats supported by [`SoundFile`](https://pysoundfile.readthedocs.io/en/latest/) should work just fine)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3hmFmLzFYnp_" + }, + "source": [ + "## Upload audio file" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xC05jFO_Ynp_", + "outputId": "c5502632-56ae-4adb-8bdc-112deedc8893", + "vscode": { + "languageId": "python" + } + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "uJWoQiJgYnp8", - "vscode": { - "languageId": "python" - } - }, - "outputs": [], - "source": [ - "!wget -q http://groups.inf.ed.ac.uk/ami/AMICorpusMirror/amicorpus/ES2004a/audio/ES2004a.Mix-Headset.wav\n", - "DEMO_FILE = {'uri': 'ES2004a.Mix-Headset', 'audio': 'ES2004a.Mix-Headset.wav'}" + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " Upload widget is only available when the cell has been executed in the\n", + " current browser session. Please rerun this cell to enable.\n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "cell_type": "markdown", - "metadata": { - "id": "EPIapoCJYnp8" - }, - "source": [ - "Because AMI is a benchmarking dataset, it comes with manual annotations (a.k.a *groundtruth*). \n", - "Let us load and visualize the expected output of the speaker diarization pipeline.\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving sample.wav to sample.wav\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Mmm0Q22JYnp8", - "vscode": { - "languageId": "python" - } - }, - "outputs": [], - "source": [ - "!wget -q https://raw.githubusercontent.com/pyannote/AMI-diarization-setup/main/only_words/rttms/test/ES2004a.rttm" + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" ] - }, + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import google.colab\n", + "own_file, _ = google.colab.files.upload().popitem()\n", + "OWN_FILE = {'audio': own_file}\n", + "notebook.reset()\n", + "\n", + "# load audio waveform and play it\n", + "waveform, sample_rate = Audio(mono=\"downmix\")(OWN_FILE)\n", + "IPythonAudio(data=waveform.squeeze(), rate=sample_rate, autoplay=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ctw4nLaPYnp_" + }, + "source": [ + "Simply replace `DEMO_FILE` by `OWN_FILE` in the rest of the notebook.\n", + "\n", + "Note, however, that unless you provide a groundtruth annotation in the next cell, you will (obviously) not be able to visualize groundtruth annotation nor evaluate the performance of the diarization pipeline quantitatively" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "x9AQgDzFYnp_" + }, + "source": [ + "## Upload groundtruth (optional)\n", + "\n", + "The groundtruth file is expected to use the RTTM format, with one line per speech turn with the following convention:\n", + "\n", + "```\n", + "SPEAKER {file_name} 1 {start_time} {duration} {speaker_name} \n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "iZaFudpDYnp_", + "outputId": "981274fa-e654-4091-c838-91c81f921e5d", + "vscode": { + "languageId": "python" + } + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 233 - }, - "id": "ToqCwl_FYnp9", - "outputId": "a1d9631f-b198-44d1-ff6d-ec304125a9f4", - "vscode": { - "languageId": "python" - } - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABiYAAADyCAYAAADJJ33UAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdeXxU1f3/8XfWyWTfSAIYdtmhIFh+4Nq6l0dbv92+ttS131b7xa2LtdYNtSp1rWIrgrsVqIq461dUXJBFRNljAoRNIAlknewhub8/6Iwzk9mXO0l4PR8PHiT3nnvuuWf53HPnwJ04wzAMAQAAAAAAAAAAmCA+1gUAAAAAAAAAAADHDhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmOaYXJi45JJLFBcX1+3Pjh07vO4799xzHccPGTLEY5q5c+c60uzdu1czZ85UamqqCgoKdN111+nIkSOO/QcPHtQvfvELjRw5UvHx8br22mu7lfPll1/W1KlTlZ2drbS0NE2aNEnPPfdcdCunF+st7SpJdXV1mj17tvr37y+LxaKRI0fqrbfeil7l9HL29rviiiu67Zs9e7bi4uJ0ySWXuKTtKW1tt2TJEsXFxen8888Pszb6rki3syStWrVK3/ve95STk6OUlBRNmDBBDzzwgDo7O13S1dTUaNasWcrMzFR2drZ+9atfqbGx0bG/tbVVl1xyiSZMmKDExESP7fjhhx96LFNFRUUEagcAAAAAAKDvSIx0hrVN7ZHO0qectOSQjjv33HP11FNPuWzr16+f130Wi8Xl99tvv12//vWvXbZlZGRIkjo7OzVz5kwVFRVp1apVOnjwoC666CIlJSXprrvukiS1tbWpX79+uummm/Tggw96LGNubq5uvPFGjR49WsnJyXrjjTd06aWXqqCgQOecc05I1x2O+rZ6U8+XZckK+pje0K7t7e0666yzVFBQoJdeekkDBw7Unj17lJ2dHfT1RkpLfatp57JmpYR0XHFxsZYsWaIHH3xQVqtV0tEPixctWqRBgwa5pO0pbW23e/du/fGPf9Qpp5wS/IVHUGd1tannS8jLC/qYSLbzsmXL9LOf/UyXXnqpVqxYoezsbL333nv605/+pNWrV+uFF15QXFycJGnWrFk6ePCgli9fro6ODl166aX6zW9+o0WLFkk62iesVquuvvpqLV261Oc1lJaWKjMz0/F7QUFB0PUAAAAAAADQl0V8YeK8e1ZEOkuf1twW2gf0FotFRUVFQe+zy8jI8Jrm3Xff1bZt2/Tee++psLBQkyZN0h133KHrr79ec+bMUXJysoYMGaKHHnpIkvTkk096zOf00093+f2aa67RM888o5UrV8ZkYeLCt39h6vleO//NoI/pDe365JNPqqamRqtWrVJSUpKko/+CP5aevegl0851+au/DOm4E044QTt37tTLL7+sWbNmSTr6v4oGDRqkoUOHuqTtKW0tHf1Ae9asWbrtttv0ySefqK6uLpjLjqiKiZNMPd/A/fuCPiZS7dzU1KRf//rX+sEPfqAFCxY4tv/P//yPCgsL9YMf/EAvvPCC/vu//1slJSV65513tG7dOk2dOlWSNG/ePH3ve9/TfffdpwEDBigtLU2PPvqoJOnTTz/12Y4FBQUxXWgEAAAAAADo6Y7JVzlF2+rVqzVhwgQVFhY6tp1zzjlqaGjQ1q1bQ8rTMAy9//77Ki0t1amnnhqpoiIIkWrX1157TdOnT9fs2bNVWFio8ePH66677ur2ahl0d9lll7n8C/knn3xSl156acTPE8kxfPvtt6ugoEC/+tWvIl3MPisS7fzuu++qurpaf/zjH7vt+/73v6+RI0dq8eLFko62d3Z2tmNRQpLOPPNMxcfHa+3atUGXf9KkSerfv7/OOussffrpp0EfDwAAAAAA0NcdswsTb7zxhtLT0x1/fvrTn3rdl56e7nh9i93111/fLc0nn3wiSaqoqHD5QFOS4/dg3zVeX1+v9PR0JScna+bMmZo3b57OOuusUC75mNAb2rW8vFwvvfSSOjs79dZbb+nmm2/W/fffr7/+9a+hXvYx45e//KVWrlypPXv2aM+ePfr000/1y192/x8YPaWtV65cqSeeeEILFy4M9lKPaZFo57KyMknSmDFjPJ5j9OjRjjQVFRXdXreUmJio3NzcoNq7f//+mj9/vpYuXaqlS5equLhYp59+ur744ouA8wAAAAAAADgWRPxVTr3Fd77zHcdrOSQpLS3N6z7p6Pc9OLvuuuscX8JqN3DgwIiXMyMjQxs2bFBjY6Pef/99/f73v9ewYcO6veYJR/WGdu3q6lJBQYEWLFighIQETZkyRfv379e9996rW2+9NaLn6mv69eunmTNn6umnn5ZhGJo5c6by8/O7pesJbW2z2XThhRdq4cKFHssI7yLZzoZhRLWszkaNGqVRo0Y5fp8xY4Z27typBx98UM8995xp5QAAAAAAAOjpIr4w8fafvhPpLKMiLS1NI0aMCHqfXX5+vtc0RUVF+uyzz1y2VVZWOvYFIz4+3nGeSZMmqaSkRHfffXdMFiaeO2+R6ecMVm9o1/79+yspKUkJCQmObWPGjFFFRYXa29uVnBzaF7qH46Jnf2L6OUN12WWX6corr5Qk/eMf//CYpie09c6dO7V79259//vfd2zr6uqSdPRf45eWlmr48OEB5RUpRZs2mHq+cITbziNHjpQklZSUaMaMGd32l5SUaOzYsZKOtmlVVZXL/iNHjqimpibomO3u29/+tlauXBlWHgAAAAAAAH1NxBcmctLM/1C1p5k+fbruvPNOVVVVOV4Psnz5cmVmZjo+CAtVV1eX2traIlHMoGVZsmJy3p4iUu160kknadGiRerq6lJ8/NG3qZWVlal///4xWZSQJGtWSkzOG4pzzz1X7e3tiouLi9qXwEeirUePHq3Nmze7bLvppptks9n00EMPqbi4OOLl9ichL8/0c4Yq3HY+++yzlZubq/vvv7/bwsRrr72m7du364477pB0tL3r6uq0fv16TZkyRZL0wQcfqKurS9OmTQvrOjZs2KD+/fuHlQcAAAAAAEBfc8y+ysmXtra2bu8VT0xMdHmViM1m65YmNTVVmZmZOvvsszV27FhdeOGFuueee1RRUaGbbrpJs2fPlsVicaTfsOHov15ubGzUoUOHtGHDBiUnJzs++Lz77rs1depUDR8+XG1tbXrrrbf03HPPdXt1CQLTU9r1t7/9rR555BFdc801uuqqq7R9+3bddddduvrqq6N16X1KQkKCSkpKHD970hPaOiUlRePHj3fJPzs7W5K6bUd34bZzWlqaHnvsMV1wwQX6zW9+oyuvvFKZmZl6//33dd111+knP/mJfvazn0k6+j+Wzj33XP3617/W/Pnz1dHRoSuvvFIXXHCBBgwY4Mh727Ztam9vV01NjWw2m6P9J02aJEn6+9//rqFDh2rcuHFqbW3V448/rg8++EDvvvtuxOsHAAAAAACgVzOOQRdffLHxwx/+0Os+Sd3+jBo1ypFm8ODBHtNcfvnljjS7d+82zjvvPMNqtRr5+fnGH/7wB6Ojo8PlXJ7yGDx4sGP/jTfeaIwYMcJISUkxcnJyjOnTpxtLliyJbGX0Ib2lXQ3DMFatWmVMmzbNsFgsxrBhw4w777zTOHLkSOQqo4/x1baGYRg//OEPjYsvvtiRtie1dTDXcayLdDsbhmF8/PHHxjnnnGNkZmYaycnJxrhx44z77ruv23irrq42fv7znxvp6elGZmamcemllxo2m80ljbd+Y/e3v/3NGD58uJGSkmLk5uYap59+uvHBBx+EWSsAAAAAAAB9T5xhmPjNoAAAAAAAAAAA4JgWH+sCAAAAAAAAAACAYwcLEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0iaEe2NXVpQMHDigjI0NxcXGRLBMAAAAAAAAAAOhlDMOQzWbTgAEDFB/v/f9FhLwwceDAARUXF4d6OAAAAAAAAAAA6IP27dun4447zuv+kBcmMjIyHCfIzMwMNRsAAAAAAAAAANAHNDQ0qLi42LF+4E3ICxP21zdlZmayMAEAAAAAAAAAACTJ79c/8OXXAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADBNwpw5c+aEcmBbW5vmzp2rP/zwfB359wtKHD5M8enpAR9/2Nam51ft1uD8NKVaEr1ua9+yVTuu+qNe6ipUZl6WXv58nwbnp6m5vdORtrm9U0+s3KjNtuUakj1ILUdatGz7yxqYcZzj57SkdL296y3H3/Z9/9q0TO990an15XUaUZjhOK83Na01WlTyvL6s+kJDsobImmj1mdZeDns6T9uC0VlZqcbHFihx+DAZTU2On73VffuWrar57WwljRunhIICv/l7aoNw2fPMSEl0tF+k8u4N5+8pAq0He7riqj1qvuZq1Q8brSVltqjUm3t/O7Buk57865MaVJiljIGFXtNF22Fbmxau2KG1Ow4HFBd6Ek9l91Z/9u2DuprUMe8htX70kZLGjFaNkeSS/rCtTS+9uV4F/1qoqk/W6CVbhgYPyPGbt70cuWnJ+tenu7rVZ6D3gc7KSjXc/4CjfJ7inT02xmVmau8zS/RijcVvGSPF/b6QXN3gKEvTc/9yidHOMTyQe2awMT/WnMvb0h6vTa9sU9bATHW0dGjLsyvVseRZffHCFh3Y0yprv3SVvLNdWQMzlWxNCij/pppmR572Yzxtc97eld+pN75+3XHfX7p+qepXNCu/ONfjeb3l58zTXMC9z7vHWU/9sLyuXPesm6thWcOVk5ITVF0Hyn7e5LYjemXBZyocnKOsrBSv152UluRol46WDpe68JTGvY68tdGnr6zRxx0rNChnUEDzH/f5krc5lXs71LTWeJ3b+ZuDedvv6zjnfc7zT0/zvpYjLY7yZluyHfNR5+tznqeGMk+MFnv5co/ka+uLpdr35QHlDs72Ow797Qu1HIHWT6Bt6q2dhmQN0b5DHbrpxY0aWZQhQ4rI/eSwrU1PrNyodbWvaXP1hm7PFIHWma+x5dynXt6+NKBnF2fu4+tg40HdsfIBrdmcps92HVBJ83saku19PO8/eEAvzX9VtVsbVDA0X8nWJK/3tJb2eK1fvFH7vjyguCJDL+5+wW95a1pr9PgXi/XMJ9s1qihPuame74v2urY/J0ZiXHkb955igCe+Yn+w9wWz58jhOmxr07+Wl6lp1T6lZiar5J3t6srv1Gvrn1DtP/+hjFHjZGnt1KGHH9e2r7qUPTSv270oUuUIdK4f7Tq259/Z2aW/vrJFI4sylJdh6TZnDHYO6czbZxie+pt9W15Kvt7c9KbWLdqkhm2NShqQ4JhPRfL+FMrnI4HMEdyv/dM9G7R2W6KGF2S7PB8E8xmBc1+w1FU7nk/qBw3X4g1VXud9rUaDFpU8r9UHVmnz4U1BxeJg6inc+21TTbM+eWmz3tqwXU0PPajmdZ/rleZMxzOVLzsOH9TtH87XpqptWl+a7KjnYDiPy7QjXT7nraHwVz+dlZXadd88Pb+iTEMGF6g10RLQc26sPmMKNzaVHWzQ3Cc/1PEvP624VSs99mNP56po2as7Vj6gjaUZGlWU7/hs1j6WfI0p93PaP3uw1+fA5ERtf6tUSWlJWvPGV3pnf52GFmUGPDaD/czP+ThPn1eEo6a1RktXzlfD/Q+oYsMqPdL+fxqWO1KGDK/zhnA+L7Y/A326RSGNv0AdtrXp/vc+0SOfvqPh/bKUHJfuEuucr+2lbS/q9YWv64YbbpDFYvGaZ9j/Y6Jjxw7ZHnhQnVVVQV/MEx/u1GFbm89tHWVlqirZqae32VRe1ejY75z2sK1N/163Ta/tekG1rTWqba3RktJFLj/vte1x+du+7+Wv3tWyzyq1ePUel/N6U9tao1d3LtOrO5eptrXGb1r7uXxtC0ZnVZWjvp1/9qajrEzta9aoo6wsoPw9tUG47Hk6t5+ZYn3+niLQerCna9haovY1a1RZWh61enPvb5Wl5VpccIIqS8t9pou2w7Y2LV69J+C40JN4Kru3+rNvr92zX00LFqppwUJ1VlV1S3/Y1qa339+orqef1IGX39CTn1cGlLe9HOVVjR7rM9D7QGdVlUv5PLHHw46yMn393L8DKmOkuN8XnMviHqMDiduerivQmB9rzmVsrm3R+iWb1VzboubaFpW++JkaX31LZYeyteW9vardV+/YHyjnPH1tc95eUV3hct9/Z/M72vbSDq/n9ZafM09zAfc+H0jf3mvbo63VW7TXtifgOgiWI/bvqpXW7lfFgQaP6ezX7dwu7nXhKY23fNzbaN3yL7V074sBz3/c50ve5lTu7eBrbudvDuZtv6/jPM05vc37nMvrPB91Tue+vaewl6+q8pA2vfqVNr/6VUDj0N++UMsRaj/ytt1bO9W21qi8qlFf7qlVeVVjxO4n9meXd/e97vGZItA68zW2nPtUoM8u7sc7H7fXtkclVfv09hd1em1jqeO5y5uqykPqWCltf3O34zq83dOaa1sc/aqiuiKg8ta21uitHR/ri5Is7a6u9prO/TkxEryN+0D7p6/YH+x9wew5crgO29r06kflKlm2zXEvqaiu0OpNr2nE8x+p/uud6qyqUvVTi/XlG+Ue70WRKkegc/1o17E9/y1f1ztijdR9zhjOPNDbZxie+pt9W1ldqT7c9qG0KlHb39ztMp+KpFA+HwlkjuCc9tWdy/TWjo+1aOWBbs8HwXxG4NwXnJ9Pqr6u9Dnvs5fh//a8HXQs9nbNnoQ7VpprW7TmzTJ9+tlXGvnuUtW8/rbLM5Uvu2urVN7+oVbs+dSlnoPhMpf2M28Nhb/66ayq0oGX39DzOk5VX1cG/Jwbqxgcbmwqr2rU12V7lPz8M177sadz2ecDL6+pcfls1l4XvurE/Zz2zx7s9VlxoMHxrLHmzTI9u3ZvUGMz2PZwPi7Sn//UttZo9abXNPTlNdq/9n1ttZVpr22Pz3lDOJ8X25+BQh1/gTpsa9O720q1b/cwfXVoX7dY53xtS7e/GFCevMoJAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAacL+NgyjqSms420tHaptanf87Etz2xGPxztrbG/0eGxLR3NA6aKhsb1R9W31ET1vV119RPLxxrldIpFXtPLuDefvKQKtB2/jMBr15u1cjZ1yOZe/2IDAuLehv3q1p/eUzte+cMoVbn5G4zcxNpJlDLcs7rrq6tXp453YzulCPTYWPJW3rdF33GhrbFdLfWtA+fvKyz0f97Tu919v5/VX3mBEsm+Hq7X96Byqo6kjoOv2VQ8dzf6vxbl+nfNynhP54m2+5G1OZd8eyDzLWxn8HevpOE/HBDPv81buQOvJLIHOXz2Nq0iOKefyhNOP3PMJdn4e7pwo0HjgLz76GltmPOv4agf3Zy9nkX6OaWrt9NoeznUdqXHlbdzHUm95vnFuD4/3knqbujq/aaNoxI9QRauO/cUD+7wv2s//gYr0/SmcsRNKDPf2fBBI+wb67OQvbSh1GMw1BjO3dj/Ok0Dqxv2zulDGi3Od+Zu3hiKYeNLY3qXEIObusYjB0Xq28HQtvj4jcufpc9tgOd8fQhmbgbZHLJ/PfI3pcGNENPujc501t3muw2DjetgLEw1zblNGfOj/8eKqZz8POO09b5Z4Pd7+heU3r7rR47GPbvqny+/fpOsX8PlD5a1M4ai+4OcRz9NZMO3Sk/LuDefvKYKtBzPr7bpyq3TPCtPOd6wIp82HhplXoOcJV/0Nf5HyBkU835DL4kU4MTza8T/S3rzlfUlSpp/9kTqPN/Z7caZyInpeX2LdB509u3KXfiBp88OrtTmA9L7qZ+X8dcEfn3f0r3DnRN6Od93ue24XahkCPS6Y/AO7nt7DjHElRa5+Qs0nEmPb/uziS0D1GaGxFQpf58w8nKNTdK7HfZG+j9398kHdrYNe9/t7ToyEWI/ZnnS/8ec/XdbjvcTyP39UtSSlDpBkXkwJRKzquKfN+2Ld152FUhZv7RiJ9g00j2jXYVjjxtr9I8JArivRWqXc0cEd40uw89ZI+90nNdIngX8BcW+Kwf4Ecy2e0nr63DZYK+evc/TFUOq2N7SHrzgQeow4+gwU7etPSj/69xNvH5HU/VzBlp9XOQEAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADBN2N8xkTnnVhm33xHy8fMumqoRRRmSpB0VNp/vwvrTzDHd3lc276KpkqTfvfiWJOmOGXdK6v5Oq99O/F+X75mwp/vze38PueyBumPGnRqSdfQN7bvrd0XknYJ5SxZLit47J53bJVzu7RrJvHvD+XuKQOvB2ziMRr15O9e9w1o0/iff85sOwXFvQ3/1ak+/o8KmBx7Y43VfuG0TzH3An6y775Lumx/xMoZaFm/fM5G3ZLGSxo7xm0fHtpJucT7QY2PBU3ln3n6GJOmTPzzr8ZiZt5+hvCE5AeVfvbvW63tz3fNxT2u/79/72n0+z+vrHMGKZN8O10UnD1Vd2ZeacPV0TZ46sNt+9+u2t5unujj5ihP9fs+Ec/1W767VoodelOQ6J/LF23zJ25zKvn13/S6/cztvZfA3R/N0nKdjgpn3OZfbOV2g9WSWQOevnsZVJMeUXbj9yD2fYOfn4c6JdlTYHM8uvviLj77GVqSeOXzx1Q5bN23T5td3etwX6eeYG37UX6eOGO1xn3NdR2pceRv3Uuzev99bnm92VNg0Z/5qSZ7vJW2P36cB6QNVf9kfJPm+F5ktWnXsb35gn/d5mmPFQqTvT+HEqlBiuLfng0DaN9BnJ39pQ6nDYK4xmLm1s+rdtXrm7o+6bQ+kbj7ZtVmP7wjuGHfOdeZv3hqKYOYjD56Sq8ShwwKeu8ciBkfr2cLTtfj6jEhy/U4DT5/bBuvkK07UK8986bU87kL9zC+Wz2e+5g2hxgj7M1A0++OOCpt+//IuSdKvzkvUpH6TutXhN5+3/ymgPMNemIhLS5MRxvEZ1iTlpCU7fvYl1dK9uO7HpCenezzWmpQaULpoSE9OV5YlK6Lnjc/Oikg+3ji3SyTyilbeveH8PUWg9eBtHEaj3rydKz1BLufyFxsQGPc29Fev9vSe0vnaF065ws0vLv2bGBvJMoZbFnfx2VlKyMvzut+u00OsD/TYWPBUXku677hhSU+WNSsloPx95eWej3ta9/uvt/P6K28wItm3w5WSfHQOlZSWFNB1+6qHpFT/1+Jcv855Oc+JfPE2X/I2p7JvD2Se5a0M/o71dJynY4KZ93krd6D1ZJZA56+exlUkx5RzecLpR+75BDs/D3dOFGg88BcffY0tM551fLWD+7OXs0g/x6SlJHhtD+e6jtS48jbuY6m3PN84t4fHe0lWhuIzvmmjaMSPUEWrjv3FA/u8z9McKxYifX8KZ+yEEsO9PR8E0r6BPjv5SxtKHQZzjcHMrd2P8ySQunH/rC6U8eJcZ/7mraEIJp6kJ8crKYi5eyxicLSeLTxdi6/PiNx5+tw2WM73h1DGZqDtEcvnM19jOtwYEc3+6FxnqRbPdRhsXOdVTgAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEyTMGfOnDmhHNjW1qa5c+fqz3/6k1KPGyjL9OmK9/E+bU+syQmaMjTX5R1k3bZ1dqlj2zZln3qSThh7nHLTLY79zmkNGRo/sJ9OKJoka6JVKQlWTeg30fHzuLzxyrbkOP6275NhaEjWUE0elK//NyI/oPehGYY0OneMJheecDQPH5zL4WtbMOLS0o7Wd1raNz97q/vOLh3ZsUOpP/mxEgoKAsrfU7uEy5qcoMmDc1zaz0yxPn9PEWg9WJMT9K2BmUrcs0vW889X+oDCqNWbyzhua1f8yo/17bOmKWNgodd0ZjAMQxOKswOOCz2Jp7J7qz97n0hJilfylClKOe00xaend09vSCMK05Uyfqyyp07SlFFFfvO2l+Pbw/OUkpTgsT4Dug/8Jy/n8nkSl5Ymy7RpiktNVdbEcQGVMVLc7wv2siQU9OsWo/3GbU/XFWjM7wHsZYxLS1OSNVEDxhc63tGa3S9Z8Qnx6j9jpI6bWqzUHKsGjC9UchDv97Tn6XyMp2327UVjC5SRnuG47xqGoeMLR2jQxOO8ntdbfs48zQWc+7ynOOveD7sMQ/ts+/Td4jOUkxL8lxQGypp8dPzts7Vq8klDlOXlXb32+rK3S5I1qVtduKfxVEce688wNHB4f32r/6SA5z/u8yVP8yePczIfczt/czBv+30d5z7n9DXvs5d3Ur/JrvPR/6TrNk/tQVISrBqXP04pCRYVjM5X8eQBAY1Df/tCKUcw9RNom3pqp8mFJ8iSkKLdh5r0vUkDlJdhidj9xJChEYXpGt9vnMdnioDrzMfYsvcpS0JKwM8ublm71MOu+nKNz5+o8QPzNWlQoeO5y5Muo0u76nZp+IQhGjplkOM6PN3T4tLSJBkqGJ2vgRP7KzElMaDytna0KCnR0Hnjxyg31ft90f05MRK8jftA+qev2B/KfWC0/i0AACAASURBVMHsOXK4DEljBueoeGKhUnOsKhpboM7EI+q0JmvQmecrNStPMiTr5PEaeEKxx3tRRMoRxFw/2nVsTU7QxOJsVdS3OmKN1H3OGM480NP90lN/s287deDpsiRaZBjS8ROHafCkYpf5VCSF8vlIIHMEO8OQhmQO0YTC0Zo2vNDl+SDYzwgcfSE54Zvnk1NOUVpOps9539EyDNX4/AlBx2Jv1+xJ+GPFkHVAphIS4pQ/9VvK//YJLs9U3nQZhrZXNGhi4UhN7j/GpZ6DOvt/xuUJQ3P9zltD4a9+OpuaZFWnvn3qt5SekxnQc24sP2MKJzZ1GYZ2VNh0wpAcZU7/ttd+7H6ulKR47aov14kDJumk4wc6Ppu1jyVfY6rbOf/zbG+vzxOH5Skzy6KisQVKSknUccfn68Tj+wU8NkP5zO+bz0M8f14RjtaOVkmGckZ/S9XD8vXdIWcrJyXH57whrM+LDUNj+o0IefwFqra5Ra1J23X+hEkakJnn0g+dyx93JE6vL3xdN9xwgywWi9f84gzDCOm7qxsaGpSVlaX6+nplZmaGfEEAAAAAAAAAAKD3C3TdgFc5AQAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATJMwZ86cOaEc2NbWprlz5+qGG26QxWKJcLFCU3awQTe9uFEjizKUl/FNmQ7b2vT8qt0anJ+mVEtit9/xjc7KSjU+tkBxmZlqeu5f3f5OHD5M8enpsS4mABO0b9mqmt/OVtK4cUooKOiV57PHNH+xK9B0kVTTWqPnP3te6xZt0sGvKrSic7k21mzUkKwhsiZaTSlDoGpaa7So5Hl9WfWFx/IdtrVp4YodWrvjsEYUZjjurZ2VlWq4/wG1fvSRbMOL9OqB/9PAjOO8Xl9Na42WbX/ZkcZbvr7E6p7fVNOsTa9sU9bATCVbk2Kejzt/bRhJznXe3N7Zrf7t15iUlqSSd7Z3u1Z/+0PVVNOs9Ys3at+XB5Q7OFsdLR0uvwdyDvc8nI9xn4d6SnuovFrv37tSuUOz1ZKY4LOePJ3b3jc6Wjq06ZVtOpKdohe++NrluGj2efcxGmmh5B9KnAjnuHC5j3FfY94eQ/d/uEov2TI0eECOSzk9tbVzH0vLSfV4znDt3VenuU99pg0VNo0cmNWt7rz1Qfv2jJREvfz5vpD7aCB9vLyuXI+8e6uGPPOetHKtksaMDuj+7q1f+Noe7XuMGeforKxU5dyH9PmiTTqwp1V5x/dTsjUprHtHeV257lk3V8OyhisnJcdlXyD36mjHm2DYy5KWlK6Xty8Nuj7K68p1/4r71PaedGhzjXIHZ6sxzubz+nYcrtBfli3X1n1tKshIDWvMRIK/fug8j5dhOOZ/7mOvqaZZXy5YoYb7H5C+XKukogJTPmfwNtd3395ZWalDDz+ujRva9dW2cq00PtKgnEE+29reP3KP5KvsjXKPsdZXHHaP2851XV3ZqCX/WK3CwTnKykrxeP5ozR0Dma8FO4/yFRfC4Ryjk1MP6eEN97mcI5LxJFJ5BVoXznE425LtiEHZlmy9veutiF3Tm2ueVu7jy2R7f402bZUOrilVynsvyTLq+IDGZmdlpfY9Pk9vJm7TcbnDekw9S9EbI9Hgraz2OKHCVD3/xX6t3XFYA5MTtW3plqDGoDNf93hf/dNfbLDHsJT4Ti145EG/6wZ96n9MlFc16ss9tSqvanTZftjWpic+3KnDtjaPv+MbnVVVsj3woDrKyjz+3VlVFesiAjBJR1mZ2tesUUdZWa89nz2m+YtdgaaLpNrWGn247UNpVaJKVpXprf1v6NWdy1TbWmNaGQJV21qjV3cu81q+w7Y2LV69R4tX73G5t3ZWValpwUI1LVio6spdWlK6yOf11bbWuKTxlq8vsbrnN9e2aP2SzWqubekR+bjz14aR5Fznnurffo21++o9Xqu//aFqrm3Rple/0uZXv1JzbUu330PJw5n7PNRT2tp99Tq4tUq1++r91pOnc9vrw/7z/oMN3Y6LZp93H6M9If9Q4kQ4x4XLfYz7GvP2GHrg5Tf05OeV3crpqa2d+5i3c4Zr/8EGraht0Utf7vdYd976oH17eVVjWH00kD6+17ZHFbs3K/GZF9S0YGHA93dv/cLX9mjfY8w4R2dVlWr//ZrKDmVry3t7v4lZYdw79tr2aGv1Fu217em2L5B7dbTjTTDsZdlr2xNSfey17dHu/Xu0952DjnuCv+vbXV2tDWWpWvZZZdhjJhL89UPnebzz/M997DXXtqj89S+Vteo1tT/7lGmfM3ib67tv76yqUvVTi7Xlvb3a8MlmLd37ot+2trdlVeUhr7HWVxx2j9vOdV1xoEFau//o315Ea+4YyHwt2HmUr7gQDucY/dXhXd3OEcl4Eqm8Aq0L5zjsHIP22vZE9JpWfPGCOp94TrX/fk1b3tur8te/VNujjwQ8NjurqnRgyVP699ev9ah6lqI3RqLBW1ntcWLP7lpHX6840BD0GHTJ08c93lf/9Bcb7DFs16GmgMrRpxYmAAAAAAAAAABAz8bCBAAAAAAAAAAAMM0x9QULtpYO1Ta1y9bSEeui9HhGY6PP3wGgN+mqq1dndbXP/TBHY3uj6ts813dje+TuNbG657c1tqulvjWs4/sKf3Xf0Rze/r4i2D7qqY/Y+3so+YXC1zgON99jhT1WBDPmndvZ/nso5wxXoGPTX3nd9weqJz7LhXotgebdVwVyr45WvAlGNGOTt+trOdLcbVs0+5k/0e6H/ubqkcg/kPN7SuevD7r3D0+xNpT5na2lQ81tRyRJHU0dXuN3tOeOfWk+Fol40tPmKtG+pkDHpvPY6an1HKl5UDT1pmdBf7Gh5T/xy59jamHiqmc/j3UReo36G/7i83cA6E2qL/h5rIuA/7h51Y2mnCdW9/w3b3k/Juftify1wcr568La31cE21c99TGz+7tZ47gvCyVWhNvOkYpPh62J0ij/X1rqr7x96dmsL12LmQKpt74eb7xdX0dzP0mu89e+3M9iPVd3OX/qAJd9wfbBSMXaq579XHnNHfqBpM0Pr9bmiOQavL40H+uL8SRS11TsZXtQY7P46Jcn99R65jktsvzFhr+/WxpQPrzKCQAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGmOqe+YmHfRVI0oytCOCluffj9jJGTdfZfL90q4/w4AvUneksVKGjvG6/6ObSUxf7ftseKOGXdqSNZQj/t21++K2DtJY3XPn3n7Gcob4v/d695U767tM+8/nXfRVEne34l98hUn+nw3qb/9fYW/enI38/YzJLm+J9fe3yWZ0ud9jeNwRDIG9HT2WBHMmHduZyn4tg43Ptmt+3K/Xl+xw286f+V13x+onvgsF+q1BKInXm+kBHKvjla8CUY0Y5O36/t4R5nuLK1x2RbNfuZPtPuhv7l6uPzN9e3n79hWovrL/uCyz18fdO8fnmJtKPO7eRdNVfPX9frqgVWacPV0TZ460GO6aM8d+9J8LBLxpKfNVSJ1TQv2/s7jvkDHZse2Eu297rKIlinS9RypeVA09aZnQX+x4dqzR+nCv/nP55hamMiwJiknLVkZ1qRYF6XHi0tP9/k7APQm8dlZSsjL87q/MzvLxNIc29KT05Vl8Vzf6cmRu9fE6p5vSU+WNSslrOP7Cn91n5Qa3v6+Itg+6qmP2Pt7KPmFwtc4DjffY4U9VgQz5p3b2f57KOcMV6Bj01953fcHqic+y4V6LYHm3VcFcq+OVrwJRjRjk7frsyamSnJdmIhmP/Mn2v3Q31w9XP7m+vbze0rnrw+69w9PsTaU+V2GNUmyHP3ILiktyWv8jvbcsS/NxyIRT3raXCXa1xTo2HQeOz21niM1D4qm3vQs6C82WC2BLTnwKicAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYJmHOnDlzQjmwra1Nc+fO1Q033CCLxRLhYoWmyzC0+1CTvjdpgPIyXMtkTU7QlKG5Sv3Pl2+4/45vxKWlyTJtmhIK+nX/e/p0xfNF2MCxobNLR3bsUOpPfqyEgoJee764tLSAYleg6SKp5UiLDEMaNmqwio4v0NiCcZpceIKsiVbTyhAow5BG547xWj7DMDShOFv/b0S+y73VMAwlT5kiyyknKzUzTxP6TfR5fSkJVpc03vL1JVb3/CRrogaML1RymF8QGal83Plrw0hyrnNP9Z9kTVTR2AKl5lg9Xqu//aEzVDA6X8WTByjJmuTye+Dn8HyM53moa9quri7Vfd2g478zVGk5qX7ryZ29byRZk5RkTVT/cYXKzEzpdlw0+7z7GO0J+YcSJ8I5LlzuY9zXmDcMQ4ljxih76iRNGVXUrZzube3ex7ydMxxGl6HDu2o0dUyhZowu8Fh33vqgNTlBkwfnKDfdElYf9dfHuwxDu+p3aUL+RGVMm6GU004L+P7u637mabsZ9xgzztHV1CTDkPrPGKnibw929JVQ7x1dhqF9tn36bvEZyknJ6bY/kHt1tONNMFISrBqXN16WhJSg6+NofyzXhPyJGjTuOMc9yNf1dalL5XW7NH34cZoxojDsMRMJPvuhyzy+n2P+52nsdbR2yDCkzO/MUOp3TjPtcwZvc/1u2w0pccxo5R3fT8WjBupb/Sf5beuUBKvG5Y9TRnq611jrLQ57itv2uk5JStD2miZNPmmIsnx8aW+05o7+52PBzaP8xYVw2GP0lKHZOtR6oNs5IhlPIpFXMHVhj8OT+k12xKBJ/SYr25ITuRhpSEOyhihl3AQlTZyovJH91G/iIKWeelLg909JmeMnacKAqT2mnu2iNUaiwVNZ7XFi+CmDZc1K0YTibJ04LE/W5PgQnmW+4e0e769/+osN1uQEjS1K1YJHHvS7bhBnGIYRdMklNTQ0KCsrS/X19crMzAwlCwAAAAAAAAAA0EcEum7Aq5wAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAA9VmdlpRruf0CdlZUuP/vTvmWrDv34p2rfstWEUsKbmtYaPbH5cT2x+XHVtNbEujh9UlNNsz5fvFGHyqv1+eKNaqppjllZDtva9NA7X+mhd77SYVtbzMrRW/iKaZ72BTKeDtvatHDFDo/172ufJ2UHG/TbJz9T2cGGAK8odOV15brhk+tVXlce9XMhcE01zVr95Oda9eTnQceWcI6Vjo6ButtuV91ttwd03wd6AuY96Cv89eWa1hotKnleNa01Lj/jG9GeFzvP64Kd40WT4/5999yA0rMwAQAAeqzOqirZHnhQnVVVLj/701FWpvY1a9RRVmZCKeFNbWuNXt25TK/uXKZaHlaiorm2ReuXbFbtvnqtX7JZzbUtMSvLYVubFq/eo8Wr9/SIB6OezldM87QvkPF02NamJz7c6XVhwts+T8qrGvXlnlqVVzUGeEWh22vbo63VW7TXtifq50LgmmtbtOnVr7T51a+Cji3hHCsdHQNNCxaqacHCgO77QE/AvAd9hb++XNtaoyWli1TbWuPyM74R7Xmx87wu2DleNNnv381PPxNQehYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmCYx1gUAAADwp6uuPtZFAHq0juaOWBcBIeqqq1dndXW3beGwtXSotqm92zYAAIBIaWyP/vdQwbfePr9jYQIAAPR41Rf8PNZFAHq0lfPXxboICFE04ttVz34e8TwBAACc3bzqxlgX4ZjX2+d8vMoJAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAafiOCQAA0OPlLVksie+aALw5+YoT+Z6JXipvyWIljR3jsq1jW0lY8W7eRVM1oijDZduOCluvfw8xAADoOe6YcackvmsiluZdNFVS7/2uCRYmAABAjxefnRXrIgA9WlJqUqyLgBDFZ2cpIS/PZVtnmDEvw5qknLTkbtsAAAAiJT05PdZFOOb19vkdr3ICAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaRLmzJkzJ5QD29raNHfuXN1www2yWCwRLhYAAMBRcWlpskyfrvi0tG9+TvfzRWudXTqyY4dSf/JjJRQUmFNQeGQY0ujcMZpceIKsidZYF6dPSrImqmhsgVJzrBowvlDJMfwSPMMwNKE4W/9vRL5SLYkxK0dv4SumedoXyHiyJidoytBcj/Xva5+7LsPQ7kNN+t6kAcrLiO7zXpdhaJ9tn75bfIZyUnKiei4Ey1DB6HwVTx4QQmwJ59ij8SR5yhSlnHaa//s+0EMw70Ff4a8vpyRYNaHfRFkTrS4/4xvRnhc7z+uCmeNFm2EY6ho/Xn9fudLvukGcYRhGKCdpaGhQVlaW6uvrlZmZGXJhAQAAAAAAAABA7xfougGvcgIAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZJjHUBAAAAAISms7JStvmPSZIyrrhcCYWFMS4RgL6C+AIA8KapplmbXtkmQ9K3zh+rtNzUWBcJvRALEwAAAEAv1VlVpaYFCyVJqT/6Lz44BBAxxBcAgDfNtS3a9OpXkqTjTxvKwgRCwqucAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApkmMdQEAAAAAhCahoEBpv/m142cAiBTiCwDAm9Qcqyb+cLSM//wMhCLOMAwjlAMbGhqUlZWl+vp6ZWZmRrpcAAAAAAAAAACgFwl03YBXOQEAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANImhHmgYhiSpoaEhYoUBAAAAAAAAAAC9k329wL5+4E3ICxM2m02SVFxcHGoWAAAAAAAAAACgj7HZbMrKyvK6P87wt3ThRVdXlw4cOKCMjAzFxcWFXEAAiKSGhgYVFxdr3759yszMjHVxAMCB+ASgJyI2AeipiE8AeiJik3+GYchms2nAgAGKj/f+TRIh/4+J+Ph4HXfccaEeDgBRlZmZyQ0CQI9EfALQExGbAPRUxCcAPRGxyTdf/1PCji+/BgAAAAAAAAAApmFhAgAAAAAAAAAAmCZhzpw5c2JdCACIpISEBJ1++ulKTAz5bXUAEBXEJwA9EbEJQE9FfALQExGbIiPkL78GAAAAAAAAAAAIFq9yAgAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmADQ491999068cQTlZGRoYKCAp1//vkqLS11SdPa2qrZs2crLy9P6enp+vGPf6zKykqXNHv37tXMmTOVmpqqgoICXXfddTpy5IiZlwKgD5s7d67i4uJ07bXXOrYRmwDEyv79+/XLX/5SeXl5slqtmjBhgj7//HPHfsMwdMstt6h///6yWq0688wztX37dpc8ampqNGvWLGVmZio7O1u/+tWv1NjYaPalAOhDOjs7dfPNN2vo0KGyWq0aPny47rjjDhmG4UhDfAIQbR9//LG+//3va8CAAYqLi9Mrr7zisj9ScWjTpk065ZRTlJKSouLiYt1zzz1Rv7behIUJAD3eRx99pNmzZ2vNmjVavny5Ojo6dPbZZ6upqcmR5ne/+51ef/11vfjii/roo4904MAB/ehHP3Ls7+zs1MyZM9Xe3q5Vq1bpmWee0dNPP61bbrklFpcEoI9Zt26dHnvsMU2cONFlO7EJQCzU1tbqpJNOUlJSkt5++21t27ZN999/v3Jychxp7rnnHj388MOaP3++1q5dq7S0NJ1zzjlqbW11pJk1a5a2bt2q5cuX64033tDHH3+s3/zmN7G4JAB9xN/+9jc9+uijeuSRR1RSUqK//e1vuueeezRv3jxHGuITgGhramrSt771Lf3jH//wuD8ScaihoUFnn322Bg8erPXr1+vee+/VnDlztGDBgqhfX69hAEAvU1VVZUgyPvroI8MwDKOurs5ISkoyXnzxRUeakpISQ5KxevVqwzAM46233jLi4+ONiooKR5pHH33UyMzMNNra2sy9AAB9is1mM44//nhj+fLlxmmnnWZcc801hmEQmwDEzvXXX2+cfPLJXvd3dXUZRUVFxr333uvYVldXZ1gsFmPx4sWGYRjGtm3bDEnGunXrHGnefvttIy4uzti/f3/0Cg+gT5s5c6Zx2WWXuWz70Y9+ZMyaNcswDOITAPNJMpYtW+b4PVJx6J///KeRk5Pj8lx3/fXXG6NGjYr2JfUa/I8JAL1OfX29JCk3N1eStH79enV0dOjMM890pBk9erQGDRqk1atXS5JWr16tCRMmqLCw0JHmnHPOUUNDg7Zu3Wpi6QH0NbNnz9bMmTNdYpBEbAIQO6+99pqmTp2qn/70pyooKNDkyZO1cOFCx/5du3apoqLCJT5lZWVp2rRpLvEpOztbU6dOdaQ588wzFR8fr7Vr15p3MQD6lBkzZuj9999XWVmZJGnjxo1auXKlzjvvPEnEJwCxF6k4tHr1ap166qlKTk52pDnnnHNUWlqq2tpak66mZ0uMdQEAIBhdXV269tprddJJJ2n8+PGSpIqKCiUnJys7O9slbWFhoSoqKhxpnD/4s++37wOAUCxZskRffPGF1q1b120fsQlArJSXl+vRRx/V73//e/3lL3/RunXrdPXVVys5OVkXX3yxI754ij/O8amgoMBlf2JionJzc4lPAEL25z//WQ0NDRo9erQSEhLU2dmpO++8U7NmzZIk4hOAmItUHKqoqNDQoUO75WHf5/yKzWMVCxMAepXZs2dry5YtWrlyZayLAuAYt2/fPl1zzTVavny5UlJSYl0cAHDo6urS1KlTddddd0mSJk+erC1btmj+/Pm6+OKLY1w6AMeyF154Qc8//7wWLVqkcePGacOGDbr22ms1YMAA4hMAHGN4lROAXuPKK6/UG2+8oRUrVui4445zbC8qKlJ7e7vq6upc0ldWVqqoqMiRprKystt++z4ACNb69etVVVWlE044QYmJiUpMTNRHH32khx9+WImJiSosLCQ2AYiJ/v37a+zYsS7bxowZo71790r6Jr54ij/O8amqqspl/5EjR1RTU0N8AhCy6667Tn/+8591wQUXaMKECbrwwgv1u9/9Tnfffbck4hOA2ItUHOJZzz8WJgD0eIZh6Morr9SyZcv0wQcfdPuvcFOmTFFSUpLef/99x7bS0lLt3btX06dPlyRNnz5dmzdvdrlxLF++XJmZmd0e3AEgEGeccYY2b96sDRs2OP5MnTpVs2bNcvxMbAIQCyeddJJKS0tdtpWVlWnw4MGSpKFDh6qoqMglPjU0NGjt2rUu8amurk7r1693pPnggw/U1dWladOmmXAVAPqi5uZmxce7fhSVkJCgrq4uScQnALEXqTg0ffp0ffzxx+ro6HCkWb58uUaNGsVrnP4jYc6cOXNiXQgA8GX27Nl6/vnn9dJLL2nAgAFqbGxUY2OjEhISlJSUpJSUFB04cECPPPKIJk2apJqaGl1++eUqLi7WrbfeKkkaNmyYli5dqvfee08TJ07Uxo0bddVVV+mKK67QOeecE+MrBNAbWSwWFRQUuPxZtGiRhg0bposuuojYBCBmBg0apNtuu02JiYnq37+/3nnnHc2ZM0d33HGHJk6cqLi4OHV2duquu+7S2LFj1d7erquvvlrNzc2aN2+eEhMT1a9fP61du1aLFy/W5MmTtXv3bl1++eU6++yzdckll8T6EgH0UiUlJXrmmWc0atQoJScna8WKFfrLX/6iX/ziFzrrrLOITwBM0djYqG3btqmiokKPPfaYpk2bJqvVqvb2dmVnZ0ckDo0cOVKPPvqotm7dqpEjRzri3W233aYpU6bEtgJ6CgMAejhJHv889dRTjjQtLS3G//7v/xo5OTlGamqq8V//9V/GwYMHXfLZvXu3cd555xlWq9XIz883/vCHPxgdQbkO7AAABAxJREFUHR0mXw2Avuy0004zrrnmGsfvxCYAsfL6668b48ePNywWizF69GhjwYIFLvu7urqMm2++2SgsLDQsFotxxhlnGKWlpS5pqqurjZ///OdGenq6kZmZaVx66aWGzWYz8zIA9DENDQ3GNddcYwwaNMhISUkxhg0bZtx4441GW1ubIw3xCUC0rVixwuPnTBdffLFhGJGLQxs3bjROPvlkw2KxGAMHDjTmzp1r1iX2CnGGYRgxWhMBAAAAAAAAAADHGL5jAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAA4Ncll1yi888/P9bFAAAAANAHJMa6AAAAAABiKy4uzuf+W2+9VQ899JAMwzCpRAAAAAD6MhYmAAAAgGPcwYMHHT//+9//1i233KLS0lLHtvT0dKWnp8eiaAAAAAD6IF7lBAAAABzjioqKHH+ysrIUFxfnsi09Pb3bq5xOP/10XXXVVbr22muVk5OjwsJCLVy4UE1NTbr00kuVkZGhESNG6O2333Y515YtW3TeeecpPT1dhYWFuvDCC3X48GGzLxkAAABADLEwAQAAACAkzzzzjPLz8/XZZ5/pqquu0m9/+1v99Kc/1YwZM/TFF1/o7LPP1oUXXqjm5mZJUl1dnb773e9q8uTJ+vzzz/XOO++osrJSP/vZz2J8JQAA/P/27h+lmSCO4/B3/QOWgoLEKk0kTZR4CLscwDJFKhvbQEgZsE/tDXKAVKmsrLQU9gKCVmnX2AnyvpXgKPg81e4Uy2/L4TMwAJQkTAAAAF9ydnaWyWSSTqeT8Xicvb29HB4eZjQapdPpZDqd5uXlJY+Pj0mS+Xyefr+f2WyWbrebfr+f29vbrFarPD09/fDfAAAApbhjAgAA+JLT09OP5+3t7RwcHKTX632sHR0dJUmen5+TJA8PD1mtVv+9r6Ku65ycnHzzxAAAwG8gTAAAAF+yu7v76b2qqk9rVVUlSd7e3pIk6/U6g8EgNzc3/3yr1Wp946QAAMBvIkwAAABFnJ+fZ7FYpN1uZ2fHVgQAAP4qd0wAAABFXF1d5fX1NZeXl7m/v09d11kulxkOh2ma5qfHAwAAChEmAACAIo6Pj3N3d5emaXJxcZFer5fr6+vs7+9na8vWBAAA/opqs9lsfnoIAAAAAADgb3AsCQAAAAAAKEaYAAAAAAAAihEmAAAAAACAYoQJAAAAAACgGGECAAAAAAAoRpgAAAAAAACKESYAAAAAAIBihAkAAAAAAKAYYQIAAAAAAChGmAAAAAAAAIoRJgAAAAAAgGKECQAAAAAAoJh3bv8p1u6sZCgAAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " Upload widget is only available when the cell has been executed in the\n", + " current browser session. Please rerun this cell to enable.\n", + " \n", + " " ], - "source": [ - "# load groundtruth\n", - "from pyannote.database.util import load_rttm\n", - "_, groundtruth = load_rttm('ES2004a.rttm').popitem()\n", - "\n", - "# visualize groundtruth\n", - "groundtruth" + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "cell_type": "markdown", - "metadata": { - "id": "p_R9T9Y5Ynp9" - }, - "source": [ - "For the rest of this notebook, we will only listen to and visualize a one-minute long excerpt of the file (but will process the whole file anyway)." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving sample.rttm to sample.rttm\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 230 - }, - "id": "bAHza4Y1Ynp-", - "outputId": "c4cc2369-bfe4-4ac2-bb71-37602e7c7a8a", - "vscode": { - "languageId": "python" - } - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3de3RV5Zk/8CeQCzEhCRcDwXJTRLygoHQs6kztVAHrEmtntFJEpY5WB7V2quO4vNE6WmlrqaOzdGS0Y50q01bbsVbrXSvC4JVaKwuRCrZKoCYSAkJAsn9/8OPUlEsSck5Odvh81spaZO/3vPvd593nOfucL9m7IEmSJAAAAAAAAFKsR74HAAAAAAAA0FECDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYHHDpx99tlRUFCw3c9bb72103WTJk3KPH7YsGE7bHPjjTdm2rzzzjtx4oknxl577RXV1dVx2WWXxUcffZRZv3LlyvjSl74UI0eOjB49esQll1yy3TgfeOCBGDduXFRVVUVZWVmMGTMm7rnnntw+OSmXlrmNiFizZk3MmDEjampqoqSkJEaOHBkPP/xw7p6cFNs2d+eff/5262bMmBEFBQVx9tlnt2jbVeZ5m7lz50ZBQUF8/vOf7+Cz0b1le64jIubPnx+f+9znok+fPtGrV68YPXp0fO9734stW7a0aFdfXx9Tp06NioqKqKqqinPOOSfWrVuXWb9x48Y4++yzY/To0VFYWLjDuXzmmWd2OKba2tosPDsAAAAAe7bCfGz0g/WbOnV7fcqK2/2YSZMmxQ9+8IMWy/bee++drispKWnx+ze/+c0499xzWyzr3bt3RERs2bIlTjzxxBg4cGDMnz8/Vq5cGWeeeWYUFRXFDTfcEBERTU1Nsffee8dVV10Vs2fP3uEY+/btG1deeWWMGjUqiouL46GHHorp06dHdXV1TJw4sd373FENTQ2dur3Kksrdelwa5nbTpk1x/PHHR3V1dfz0pz+NffbZJ1asWBFVVVW7tc8dtaFhY6dur7SyV7sfM3jw4Jg7d27Mnj07SktLI2LrF9D33ntvDBkypEXbrjLP2yxfvjwuvfTS+Ou//ut273c2bamr69Tt9ezXb7cel825/tnPfhannXZaTJ8+PZ5++umoqqqKJ554Iv75n/85FixYED/+8Y+joKAgIiKmTp0aK1eujMcffzw2b94c06dPj/POOy/uvffeiNh6XJSWlsbFF18c999//y73YcmSJVFRUZH5vbq6ereeCwAAAAD+LC+BxwnffrpTt/d/32j/l/8lJSUxcODAdq/bpnfv3jtt89hjj8Ubb7wRTzzxRAwYMCDGjBkT1113XVx++eUxc+bMKC4ujmHDhsXNN98cERF33XXXDvs59thjW/z+1a9+Ne6+++6YN29eXgKPaY98qVO39+Dnf7lbj0vD3N51111RX18f8+fPj6KioojY+lcH+fLDM3/aqdv7yv+e0e7HHH744bFs2bJ44IEHYurUqRGx9a+ghgwZEsOHD2/RtqvMc8TWL8mnTp0a3/jGN+K5556LNWvWtGe3s6r20DGdur193v3Dbj0uW3O9fv36OPfcc2Py5Mlxxx13ZJb/wz/8QwwYMCAmT54cP/7xj+OLX/xiLF68OH71q1/Fiy++GOPGjYuIiFtuuSU+97nPxXe/+90YNGhQlJWVxW233RYREc8///wu57K6ujpvASYAAABAd+WSVnmwYMGCGD16dAwYMCCzbOLEibF27dr43e9+t1t9JkkSTz75ZCxZsiT+5m/+JltDpZ2yNbcPPvhgjB8/PmbMmBEDBgyIQw45JG644YbtLrFDS1/+8pdb/G/+u+66K6ZPn5717WTzNfzNb34zqqur45xzzsn2MLu1bMz1Y489FnV1dXHppZdut+6kk06KkSNHxn333RcRW+e8qqoqE3ZERBx33HHRo0ePWLhwYbvHP2bMmKipqYnjjz8+nn/++XY/HgAAAIDtCTx24qGHHory8vLMz6mnnrrTdeXl5ZnL2Gxz+eWXb9fmueeei4iI2traFl+URkTm9/Zex72hoSHKy8ujuLg4TjzxxLjlllvi+OOP351d3mOkYW5///vfx09/+tPYsmVLPPzww3H11VfHTTfdFP/6r/+6u7u9RzjjjDNi3rx5sWLFilixYkU8//zzccYZ2/+1SFeZ53nz5sWdd94Zc+bMae+u7vGyMddvvvlmREQceOCBO9zGqFGjMm1qa2u3u+xUYWFh9O3bt11zXlNTE7fffnvcf//9cf/998fgwYPj2GOPjVdeeaXNfQAAAACwY3m5pFUafOYzn8lcmiQioqysbKfrIrbeT+PjLrvsssyNc7fZZ599sj7O3r17x6JFi2LdunXx5JNPxj/90z/Fvvvuu93lrvizNMxtc3NzVFdXxx133BE9e/aMI444It599934zne+E9dee21Wt9Wd7L333nHiiSfGf/3Xf0WSJHHiiSdG//79t2vXFea5sbExpk2bFnPmzNnhGNm1bM51kiQ5HevHHXDAAXHAAQdkfj/qqKNi2bJlMXv27Ljnnns6bRwAAAAA3VFeAo9H/vkz+dhsu5SVlcWIESPavW6b/v3777TNwIED44UXXmixbNWqVZl17dGjR4/MdsaMGROLFy+Ob33rW3kJPO454d5O3+buSMPc1tTURFFRUfTs2TOz7MADD4za2trYtGlTFBcXt7mvbDjzh3/fqdvriC9/+ctx4YUXRkTEv//7v++wTVeY52XLlsXy5cvjpJNOyixrbm6OiK1/ObBkyZLYb7/92tRXtgx8bVGnbq+jOjrXI0eOjIiIxYsXx1FHHbXd+sWLF8dBBx0UEVvndfXq1S3Wf/TRR1FfX9/uuv2X/uqv/irmzZvXoT4AAAAAyFPg0aesc7+s7WrGjx8f119/faxevTpziZTHH388KioqMl+u7a7m5uZoamrKxjDbrbKkMi/b7UqyNbdHH3103HvvvdHc3Bw9emy98tybb74ZNTU1nR52RESUVvbq9G3urkmTJsWmTZuioKAgJk6cmJNtZGOeR40aFb/97W9bLLvqqquisbExbr755hg8eHDWx92anv36dfo2O6Kjcz1hwoTo27dv3HTTTdsFHg8++GAsXbo0rrvuuojYOudr1qyJl19+OY444oiIiHjqqaeiubk5jjzyyA7tx6JFi6KmpqZDfQAAAADgkla7pampabtrthcWFra4nEpjY+N2bfbaa6+oqKiICRMmxEEHHRTTpk2Lb3/721FbWxtXXXVVzJgxI0pKSjLtFy3a+r+t161bF3/6059i0aJFUVxcnPlC9Vvf+laMGzcu9ttvv2hqaoqHH3447rnnnu0u30LbdZW5veCCC+LWW2+Nr371q3HRRRfF0qVL44YbboiLL744V7vebfTs2TMWL16c+feOdIV57tWrVxxyyCEt+q+qqoqI2G45O9bRuS4rK4v/+I//iNNPPz3OO++8uPDCC6OioiKefPLJuOyyy+Lv//7v47TTTouIrX9hNWnSpDj33HPj9ttvj82bN8eFF14Yp59+egwaNCjT9xtvvBGbNm2K+vr6aGxszBwDY8aMiYiI73//+zF8+PA4+OCDY+PGjfGf//mf8dRTT8Vjjz2W9ecHAAAAYI+TsJ2zzjorOfnkk3e6LiK2+znggAMybYYOHbrDNl/5ylcybZYvX56ccMIJSWlpadK/f//k61//erJ58+YW29pRH0OHDs2sv/LKK5MRI0YkvXr1Svr06ZOMHz8+mTt3bnafjG4mLXObJEkyf/785Mgjj0xKSkqSfffdN7n++uuTjz76KHtPRjeyq3lNkiQ5+eSTk7POOivTtivNc3v2g+zPdZIkya9//etk4sSJSUVFRVJcXJwcfPDByXe/+93tXm91dXXJlClTkvLy8qSioiKZPn160tjY2KLNzo6dbWbNmpXst99+Sa9evZK+ffsmxx57bPLUU0918FkBAAAAIEmSpCBJOvFurQAAAAAAADnQI98DAAAAAAAA6CiBBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKlXmKuOm5ub47333ovevXtHQUFBrjYDAAAAAACkQJIk0djYGIMGDYoePbL/9xg5Czzee++9GDx4cK66BwAAAAAAUugPf/hDfOITn8h6vzkLPHr37h0RWwdeUVGRq80AAAAAAAApsHbt2hg8eHAmP8i2nAUe2y5jVVFRIfAAAAAAAAAiInJ2Gww3LQcAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6OQ881td/mOtNQLf0x5Xvxnd/clP8ceW7+R4K3Uz9xvq4d/GPon5jfURsrdMv3febrNbrXPSZZltWrYq1N30vtqxale+hZE132KdsHadpOt7/8vXf0XaQbbk49j7e5/uNTTHn6bfi/camrPXPnq2zjil1GSC3ukud7S77EdG99oXOlfPAY8OajbneBHRLtXW18euip6K2rjbfQ6Gb+WBjfcxdcm988P9PGj78YEO8PPe38eEHG7K2jVz0mWZbVq+Oxu/Nji2rV+d7KFnTHfYpW8dpmo73v3z9d7QdZFsujr2P9/l+Y1Pc+cwygQdZ01nHlLoMkFvdpc52l/2I6F77QudySSsAAAAAACD1BB4AAAAAAEDqFeZ6A03rN8WGBpe1gvba9OHmiIhY/9H6aGhqyPNo6E7WbVq3w+VN67JXr5vWbcpKP91N85qG2FJXl+9hZEXzmu5Tlzp67KfxeF+3ad0u31t2Viegs7R2jLa3r7/UuGFzfLA+fa9dup7GDZs7dXvZfG0A8Gfd7fy3O7xfdLc5ofPkPPB49Ppno7SoNNebgW6noV99xOSIm5bPilie79GwJ/jlNU/mewjdXt3pU/I9BHZgTzz2r55/Zb6HALuU62P0oh++lNP+IVfUbwDawvsFezKXtAIAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9nN/DY+KVn45hhwzJ9Wag23l1yaKY996j8fVhl8eYAw/L93DoRpY3vL3D63me+M3PRr9hfbKyjbrlH+yR90VoTb+590XRQQfmexhZsfmNxd3mniQdPfbTeLxfd9T1Maxy+E7X76xOQGdp7Rhtjx0dz7ecOS5GDOydlf7Zs71V29ip94TJ5msDgD/rbue/3eH9orvNCZ0n54FHSVlxlFb2yvVmoNsp3qsoIiLKCsuisqQyz6OhOykvLt/h8pLy7NXrkvLirPTT3fSoqoye/frlexhZsaWq+9Sljh77aTzey4vLd/nesrM6AZ2ltWO0vX39pd6lRdGnLH2vXbqe3qVFnbq9bL42APiz7nb+2x3eL7rbnNB5XNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6vWcOXPmzFx03NTUFDfeeGP8y7/8S/SucpMZaK/mLUk0Ll8fnxr1qajoXZHv4dDN9OpZGqP3PjRKC0sjIqKotDAGHTIgirN4481c9JlmBWVlUTJ+fPQo7z7vid1hn7J1nKbpeP/L139H20G25eLY+3ifpcU944jhfWOvksKs9c+erbOOKXUZILe6S53tLvsR0b32hT/blhtcccUVUVJSkvX+C5IkSbLea0SsXbs2Kisro6GhISoqfFkLAAAAAAB7slznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8uoH6jfVx7+IfRf3G+i7RT67taJwdHfv7jU0x5+m34v3Gpqy0y5Zcba+z94PcM6fp1hXmryuMAei61AjypS3n+bvzWWDLqlWx9qbvxZZVq3ZrXB19PLBVe95fvBd1nnw91+Y4+zyndEV1OT4eBR7dwAcb62Puknvjgw4GFdnqJ9d2NM6Ojv39xqa485llbQo82tIuW3K1vc7eD3LPnKZbV5i/rjAGoOtSI8iXtpzn785ngS2rV0fj92bHltWrd2tcHX08sFV73l+8F3WefD3X5jj7PKd0RXXrBB4AAAAAAAC7JPAAAAAAAABST+ABAAAAAACkXmG+B0D2rNu0LhqaGjr0+DT5+P5ma+yNGzbHB+s37XJ9PrQ2rt3pj+4p28cKnaMrvSYdQ8COdKU6xZ5pV591OvJZoHlNQ2ypq9utxwHZ05ZzUO9Fna+zPxuY49zxOY+upHHDRzntX+DRjVw9/8p8D6FT5WJ/L/rhS1nvMxu66rjoehwrdJRjCICuKFefdepOn5KTfoH2cQ7aNZmX7sNc0pV81LQ+p/27pBUAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDquYdHN3LdUdfHsMrhu/345Q1vp+o+IB/f32yN/ZYzx8WIgb13uv6t2sa8XPewtXG1V772g9zL9rFC5+hKr0nHELAjXalOsWfa1WedjnwW6Df3vig66MB2P27zG4vd/wOyqC3noN6LOl9nfzYwx7njcx5dyaKl78Xfzspd/wKPbqS8uDwqSyo79Pg0+fj+ZmvsvUuLok9Z8S7X50Nr49qd/uiesn2s0Dm60mvSMQTsSFeqU+yZdvVZpyOfBXpUVUbPfv3a/bgtVbv/uQvYXlvOQb0Xdb7O/mxgjnPH5zy6kt6luY0kXNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9XrOnDlzZi46bmpqihtvvDGuuOKKKCkpycUm+JhePUtj9N6HRmlhaZfoJ9d2NM6Ojr20uGccMbxv7FWy6+vItbVdtuRqe529H+SeOU23rjB/XWEMQNelRpAvbTnP353PAgVlZVEyfnz0KN+9e4B09PHAVu15f/Fe1Hny9Vyb4+zznNLVNDU1xc3f+07OcoOCJEmSrPcaEWvXro3KyspoaGiIioqKXGwCAAAAAABIiVznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN47CHeb2yKOU+/Fe83NnWoDQDs6eo31se9i38U9Rvrd7kMAAAA6FwCjz3E+41Nceczy1oNPFprAwB7ug821sfcJffGBx8LN3a0DAAAAOhcAg8AAAAAACD1BB4AAAAAAEDqFeZ7AHSuxg2b44P1m3a6DgBom3Wb1kVDU0Pm3wAAAEB+CTz2MBf98KV8DwEAuoWr51+Z7yEAAAAAH+OSVgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKnnHh57mFvOHBcjBvbe4bq3ahvd4wMA2ui6o66PYZXDIyJiecPb7ukBAAAAeSbw2MP0Li2KPmXFO10HALRNeXF5VJZUZv4NAAAA5JdLWgEAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gcceon/vkjjn2P2if++SDrUBgD1dn1594/QDvhR9evXd5TIAAACgcxUkSZLkouO1a9dGZWVlNDQ0REVFRS42AQAAAAAApESucwN/4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1CnPVcZIkERGxdu3aXG0CAAAAAABIiW15wbb8INtyFnjU1dVFRMTgwYNztQkAAAAAACBl6urqorKyMuv95izw6Nu3b0REvPPOOzkZOJAOa9eujcGDB8cf/vCHqKioyPdwgDxQBwB1AIhQCwB1AIhoaGiIIUOGZPKDbMtZ4NGjx9bbg1RWVipgQFRUVKgFsIdTBwB1AIhQCwB1APhzfpD1fnPSKwAAAAAAQCcSeAAAAAAAAKnXc+bMmTNz1nnPnnHsscdGYWHOrpwFpIBaAKgDgDoARKgFgDoA5LYOFCRJkmS9VwAAAAAAgE7kklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSr92Bx7vvvhtnnHFG9OvXL0pLS2P06NHx0ksvZdYnSRLXXHNN1NTURGlpaRx33HGxdOnSFn3U19fH1KlTo6KiIqqqquKcc86JdevWdXxvgE7RWh144IEHYsKECdGvX78oKCiIRYsWbdfHxo0bY8aMGdGvX78oLy+Pv/u7v4tVq1Z15m4AHbSrWrB58+a4/PLLY/To0VFWVhaDBg2KM888M957770WfTgngHRr7Zxg5syZMWrUqCgrK4s+ffrEcccdFwsXLmzRhzoA6dZaHfi4888/PwoKCuL73/9+i+XqAKRfa7Xg7LPPjoKCghY/kyZNatGHWgDp1pZzgsWLF8fkyZOjsrIyysrK4pOf/GS88847mfXZ+L6wXYHHBx98EEcffXQUFRXFI488Em+88UbcdNNN0adPn0ybb3/72/Fv//Zvcfvtt8fChQujrKwsJk6cGBs3bsy0mTp1avzud7+Lxx9/PB566KH49a9/Heedd167Bg7kR1vqwPr16+OYY46JWbNm7bSfr33ta/GLX/wifvKTn8Szzz4b7733XnzhC1/ojF0AsqC1WvDhhx/GK6+8EldffXW88sor8cADD8SSJUti8uTJLfpxTgDp1ZZzgpEjR8att94av/3tb2PevHkxbNiwmDBhQvzpT3/KtFEHIL3aUge2+dnPfhb/93//F4MGDdpunToA6dbWWjBp0qRYuXJl5ue+++5rsV4tgPRqSx1YtmxZHHPMMTFq1Kh45pln4rXXXourr746evXqlWmTle8Lk3a4/PLLk2OOOWan65ubm5OBAwcm3/nOdzLL1qxZk5SUlCT33XdfkiRJ8sYbbyQRkbz44ouZNo888khSUFCQvPvuu+0ZDpAHrdWBj3v77beTiEheffXVFsvXrFmTFBUVJT/5yU8yyxYvXpxERLJgwYKsjhfIjfbUgm1eeOGFJCKSFStWJEninADSbnfqQENDQxIRyRNPPJEkiToAadfWOvDHP/4x2WeffZLXX389GTp0aDJ79uzMOnUA0q8tteCss85KTj755J2uVwsg3dpSB774xS8mZ5xxxk7XZ+v7wnb9hceDDz4Y48aNi1NPPTWqq6tj7NixMWfOnMz6t99+O2pra+O4447LLKusrIwjjzwyFixYEBERCxYsiKqqqhg3blymzXHHHRc9evTY7s/bga6ntTrQFi+//HJs3ry5Ra0YNWpUDBkyJFMrgK5td2pBQ0NDFBQURFVVVUQ4J4C0a28d2LRpU9xxxx1RWVkZhx12WESoA5B2bakDzc3NMW3atLjsssvi4IMP3q4PdQDSr63nBM8880xUV1fHAQccEBdccEHU1dVl1qkFkG6t1YHm5ub45S9/GSNHjoyJEydGdXV1HHnkkfHzn/880yZb3xe2K/D4/e9/H7fddlvsv//+8eijj8YFF1wQF198cdx9990REVFbWxsREQMGDGjxuAEDBmTW1dbWRnV1dYv1hYWF0bdv30wboOtqrQ60RW1tbRQXF2e+9Nzm47UC6NraWws2btwYl19+eUyZMiUqKioiwjkBpF1b68BDDz0U5eXl0atXr5g9e3Y8/vjj0b9//4hQByDt2lIHZs2aFYWFhXHxxRfvsA91ANKvLbVg0qRJ8cMf/jCefPLJmDVrVjz77LNxwgknxJYtWyJCLYC0a60OrF69OtatWxc33nhjTJo0KR577LE45ZRT4gtf+EI8++yzEZG97wsL2zPw5ubmGDduXNxwww0RETF27Nh4/fXX4/bbb4+zzjqrPV0BKaUOABHtqwWbN2+O0047LZIkidtuuy0fwwVyoK114DOf+UwsWrQo3n///ZgzZ06cdtppsXDhwu2+1ADSp7U68PLLL8fNN98cr7zyShQUFOR5tECutOWc4PTTT8+0Hz16dBx66KGx3377xTPPPBOf/exn8zJuIHtaqwPNzc0REXHyySfH1772tYiIGDNmTMyfPz9uv/32+PSnP521sbTrLzxqamrioIMOarHswAMPzNxJfeDAgRER2905fdWqVZl1AwcOjNWrV7dY/9FHH0V9fX2mDdB1tVYH2mLgwIGxadOmWLNmTYvlH68VQNfW1lqwLexYsWJFPP7445vo3U4AAAbjSURBVJm/7ohwTgBp19Y6UFZWFiNGjIhPfepTceedd0ZhYWHceeedEaEOQNq1Vgeee+65WL16dQwZMiQKCwujsLAwVqxYEV//+tdj2LBhEaEOQHewO98T7LvvvtG/f/946623IkItgLRrrQ70798/CgsLW80WsvF9YbsCj6OPPjqWLFnSYtmbb74ZQ4cOjYiI4cOHx8CBA+PJJ5/MrF+7dm0sXLgwxo8fHxER48ePjzVr1sTLL7+cafPUU09Fc3NzHHnkke0ZDpAHrdWBtjjiiCOiqKioRa1YsmRJvPPOO5laAXRtbakF28KOpUuXxhNPPBH9+vVr0d45AaTb7p4TNDc3R1NTU0SoA5B2rdWBadOmxWuvvRaLFi3K/AwaNCguu+yyePTRRyNCHYDuYHfOCf74xz9GXV1d1NTURIRaAGnXWh0oLi6OT37yk7tsk7XvC9t8e/MkSV544YWksLAwuf7665OlS5cmP/rRj5K99tor+e///u9MmxtvvDGpqqpK/vd//zd57bXXkpNPPjkZPnx4smHDhkybSZMmJWPHjk0WLlyYzJs3L9l///2TKVOmtGcoQJ60pQ7U1dUlr776avLLX/4yiYhk7ty5yauvvpqsXLky0+b8889PhgwZkjz11FPJSy+9lIwfPz4ZP358PnYJ2A2t1YJNmzYlkydPTj7xiU8kixYtSlauXJn5aWpqyvTjnADSq7U6sG7duuSKK65IFixYkCxfvjx56aWXkunTpyclJSXJ66+/nulHHYD0astng780dOjQZPbs2S2WqQOQbq3VgsbGxuTSSy9NFixYkLz99tvJE088kRx++OHJ/vvvn2zcuDHTj1oA6dWWc4IHHnggKSoqSu64445k6dKlyS233JL07Nkzee655zJtsvF9YbsCjyRJkl/84hfJIYcckpSUlCSjRo1K7rjjjhbrm5ubk6uvvjoZMGBAUlJSknz2s59NlixZ0qJNXV1dMmXKlKS8vDypqKhIpk+fnjQ2NrZ3KECetFYHfvCDHyQRsd3Ptddem2mzYcOG5B//8R+TPn36JHvttVdyyimntAhEgK5vV7Xg7bff3mEdiIjk6aefzrRzTgDptqs6sGHDhuSUU05JBg0alBQXFyc1NTXJ5MmTkxdeeKFFH+oApFtrnw3+0o4CD3UA0m9XteDDDz9MJkyYkOy9995JUVFRMnTo0OTcc89NamtrW/ShFkC6teWc4M4770xGjBiR9OrVKznssMOSn//85y3WZ+P7woIkSZJ2/HUKAAAAAABAl9Oue3gAAAAAAAB0RQIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAA6LCzzz47Pv/5z+d7GAAAwB6sMN8DAAAAuraCgoJdrr/22mvj5ptvjiRJOmlEAAAA2xN4AAAAu7Ry5crMv//nf/4nrrnmmliyZElmWXl5eZSXl+djaAAAABkuaQUAAOzSwIEDMz+VlZVRUFDQYll5efl2l7Q69thj46KLLopLLrkk+vTpEwMGDIg5c+bE+vXrY/r06dG7d+8YMWJEPPLIIy229frrr8cJJ5wQ5eXlMWDAgJg2bVq8//77nb3LAABACgk8AACAnLj77rujf//+8cILL8RFF10UF1xwQZx66qlx1FFHxSuvvBITJkyIadOmxYcffhgREWvWrIm//du/jbFjx8ZLL70Uv/rVr2LVqlVx2mmn5XlPAACANBB4AAAAOXHYYYfFVVddFfvvv39cccUV0atXr+jfv3+ce+65sf/++8c111wTdXV18dprr0VExK233hpjx46NG264IUaNGhVjx46Nu+66K55++ul4880387w3AABAV+ceHgAAQE4ceuihmX/37Nkz+vXrF6NHj84sGzBgQERErF69OiIifvOb38TTTz+9w/uBLFu2LEaOHJnjEQMAAGkm8AAAAHKiqKioxe8FBQUtlhUUFERERHNzc0RErFu3Lk466aSYNWvWdn3V1NTkcKQAAEB3IPAAAAC6hMMPPzzuv//+GDZsWBQW+qgCAAC0j3t4AAAAXcKMGTOivr4+pkyZEi+++GIsW7YsHn300Zg+fXps2bIl38MDAAC6OIEHAADQJQwaNCief/752LJlS0yYMCFGjx4dl1xySVRVVUWPHj66AAAAu1aQJEmS70EAAAAAAAB0hP8mBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1/h//ffiQt3ubxgAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from pyannote.core import Segment, notebook\n", - "# make notebook visualization zoom on 600s < t < 660s time range\n", - "EXCERPT = Segment(600, 660)\n", - "notebook.crop = EXCERPT\n", - "\n", - "# visualize excerpt groundtruth\n", - "groundtruth" + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABHQAAACsCAYAAAAaLvvnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAOHUlEQVR4nO3de6ykZ10H8O+v3YIGCghbG1yqC+WiBWwpa9OKJk2DbQUVURRISCDyhxowXNQEFOzWqEnBtl4AjQVCDYSLgFpBqA1ZBJWCp1As5aJtbFPWUkStbVHLpT//mJdwaLuX2Z1zZp6zn08yOe95b/ObeeeZ951vnmemujsAAAAAjOOoZRcAAAAAwHwEOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoTKrqeVX1msPY/uSq+khVXVNVf1VVD1i37OVVdV1Vfa6qzllMxVvbRh2PqnpIVe2pqjsOZ/8AAACwTAKdBaiqo5O8PsnLuvvxSf48ya9Oy05K8qwkj01ybpLXTeuzQfZ3PJL8X5JXJvmVJZUHAAAAh22oQKeq7ldV762qT1bVp6rqmVV1Q1W9auqJ8bGqeuS07nFV9a6q+sfp9qRp/mlTz41PVNU/VNVj7uV+njqts72qzp6mP15Vf1ZV95/WuaGqLqiqjyf5mSSPTvKhaRdXJPnpafppSd7W3Xd2978muS7JaRv6RG2SEY9Hd3+5u/8us2AHAAAAhjRUoJNZD5d/6+6Tu/txSd4/zf/vqSfGa5L83jTv95Nc3N0/kNmH+ddP8z+b5Ie7+wlJfiPJ76y/g6p6epKXJXnKNOsVSZ7c3acmWUvy0nWr/0d3n9rdb0tybWbhTTILFE6YpnckuWndNp+f5m0FIx4PAAAAGN62w9l4744Tdic5bzGlJEnO37H3pt37WX5Nkgur6oIk7+nuD1dVkrx1Wv7WJBdP009OctK0PEkeMPXmeGCSS6vqUUk6yTHr9n9Wkl1Jzu7u26rqx5KclOTvp/3cJ8lH1q3/9nXTP5fkD6rqlUkuS/KVg37UC3L6eZfvzoKPx5Xnn7N7P8sdDwAAAFiCwwp0Nlt3/3NVnZpZb43fqqoPfGPR+tWmv0clOb27v2VozfRFuHu6++lVtTPJB9ctvj7JIzIbrrOWpJJc0d3P3kdJX15X22eTnD3dx6OTPHVatDff2jvkYdO84Q16PAAAAGB4Qw25qqrvSvI/3f3mJK9Ocuq06Jnr/n6jx8bfJPmlddueMk0+MN8MVJ53t7u4MbPhQH9aVY9NcmWSJ637Hpj7TeHAvdX2ndPfozIbFvTH06LLkjyrqu5bVQ9P8qgkH5vjYa+sQY8HAAAADK+6+8BrrYjpJ79fneSuJF9N8otJ3pnZUJsfTXJnkmd393VVtT3Ja5N8X2Y9kT7U3b9QVWckuTSz3hzvTfKc7t5ZVc9Lsqu7X1hVT0jyliQ/nuR7klyQ5L5TGa/o7suq6oZp/S9Ntb0oyQumdd6d5OU9PblV9euZDQH6WpIXd/f7NuQJ2mQDH48bkjwgsyFbt2Y2pOvTG/AUAQAAwIYYKtC5N3f/IM9yOR4AAACw8YYacgUAAADAFuihAwAAAHCk0UMHAAAAYDACHQAAAIDBCHQAAAAABrNtnpW3b9/eO3fu3KBSAAAAAI48V1111Ze6+7h5tpkr0Nm5c2fW1tbmqwoAAACAfaqqG+fdxpArAAAAgMEIdAAAAAAGI9ABAAAAGIxABwAAAGAwAh0AAACAwQh0AAAAAAYj0AEAAAAYjEAHAAAAYDACHQAAAIDBCHQAAAAABiPQAQAAABiMQAcAAABgMAIdAAAAgMEIdAAAAAAGI9ABAAAAGIxABwAAAGAwAh0AAACAwQh0AAAAAAYj0AEAAAAYjEAHAAAAYDACHQAAAIDBCHQAAAAABiPQAQAAABiMQAcAAABgMAIdAAAAgMEIdAAAAAAGI9ABAAAAGIxABwAAAGAwAh0AAACAwcwV6Hz9llsWeue3XXjRQveXJJfsuW7h+1yEVa1rFW3F52orPibG53XJKtnf63EjrhfgUB3q69F7LnCk8z64eHMFOnctONC5/aKLF7q/JHnDB69f+D4XYVXrWkVb8bnaio+J8Xldskr293rciOsFOFSH+nr0ngsc6bwPLp4hVwAAAACDEegAAAAADGbbvBvs3XHCRtSxUKefd/myS+AwOYawObQ1RjHC9QcciPdcABZJDx0AAACAwQh0AAAAAAYz95CrHXtvWtidb1T36SvPP2dD9ns4dLGdzyoew8Ph+LOqtlpbY1wHep9c5PUHHI7DuX71ngscyXwmWjw9dAAAAAAGI9ABAAAAGIxABwAAAGAwcwU6Rx1//ELv/NiXvmSh+0uS55954sL3uQirWtcq2orP1VZ8TIzP65JVsr/X40ZcL8ChOtTXo/dc4EjnfXDxqrsPeuVdu3b12traBpYDAAAAcGSpqqu6e9c82xhyBQAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoAAAAAAxGoAMAAAAwGIEOAAAAwGAEOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoAAAAAAxGoAMAAAAwGIEOAAAAwGAEOgAAAACDEehsgtsuvGjZJQxvs55DxwruSbtg1Vyy57oh9gkAq8Z13dYi0NkEt1908bJLGN5mPYeOFdyTdsGqecMHrx9inwCwalzXbS0CHQAAAIDBCHQAAAAABiPQAQAAABjMtmUXcKTYu+OEZZfAQXKsAFbf6eddvuwSAGBIPu9sHXroAAAAAAxGoAMAAAAwGEOuNsmOvTctu4ShbWa3QMcKvpVuuayiK88/Z6H7M4QLgCOFzzsrqmruTfTQAQAAABiMQAcAAABgMAKdTXDsS1+y7BKGt1nPoWMF96RdsGqef+aJQ+wTAFaN67qtpbr7oFfetWtXr62tbWA5AAAAAEeWqrqqu3fNs40eOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoAAAAAAxGoAMAAAAwGIEOAAAAwGAEOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoAAAAAAxGoAMAAAAwGIEOAAAAwGAEOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIOp7j74lav+PcmNG1cO3KvtSb607CKAQ6L9wpi0XRiTtgvjekx3HzvPBtvmWbm7j5uvHjh8VbXW3buWXQcwP+0XxqTtwpi0XRhXVa3Nu40hVwAAAACDEegAAAAADEagwwj+ZNkFAIdM+4UxabswJm0XxjV3+53rS5EBAAAAWD49dAAAAAAGI9BhpVTVG6vqi1X1qXXzHlxVV1TVv0x/v2OZNQL3tI+2u7uq9lbV1dPtKcusEbinqjqhqvZU1aer6tqqetE037kXVtx+2q/zL6ywqvq2qvpYVX1yarvnT/MfXlUfrarrqurtVXWfA+1LoMOqeVOSc+8272VJPtDdj0rygel/YLW8Kfdsu0lycXefMt3+epNrAg7sa0l+ubtPSnJ6khdU1Ulx7oUR7Kv9Js6/sMruTHJWd5+c5JQk51bV6UkuyKztPjLJfyV5/oF2JNBhpXT3h5L8591mPy3JpdP0pUl+clOLAg5oH20XWHHdfXN3f3yavj3JZ5LsiHMvrLz9tF9ghfXMHdO/x0y3TnJWkndO8w/q3CvQYQTHd/fN0/QXkhy/zGKAubywqv5pGpJlyAassKrameQJST4a514Yyt3ab+L8Cyutqo6uqquTfDHJFUmuT3Jrd39tWuXzOYiAVqDDUHr2s2x+mg3G8EdJTsysK+nNSS5cbjnAvlTV/ZO8K8mLu/u29cuce2G13Uv7df6FFdfdX+/uU5I8LMlpSb73UPYj0GEEt1TVQ5Nk+vvFJdcDHITuvmU6Wd2V5JLMTlbAiqmqYzL7MPiW7n73NNu5FwZwb+3X+RfG0d23JtmT5IwkD6qqbdOihyXZe6DtBTqM4LIkz52mn5vkL5dYC3CQvvFhcPL0JJ/a17rAclRVJXlDks9090XrFjn3worbV/t1/oXVVlXHVdWDpulvT/IjmX0H1p4kz5hWO6hzb8160cJqqKq3JjkzyfYktyQ5L8lfJHlHku9OcmOSn+1uX74KK2QfbffMzLp7d5Ibkvz8uu/kAFZAVf1Qkg8nuSbJXdPsX8vsezice2GF7af9PjvOv7Cyqur7M/vS46Mz62Tzju7+zap6RJK3JXlwkk8keU5337nffQl0AAAAAMZiyBUAAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAwMqrqodU1dXT7QtVtXeavqOqXrfs+gAANpufLQcAhlJVu5Pc0d2/u+xaAACWRQ8dAGBYVXVmVb1nmt5dVZdW1Yer6saq+qmqelVVXVNV76+qY6b1nlhVf1tVV1XV5VX10OU+CgCA+Ql0AICt5MQkZyX5iSRvTrKnux+f5H+TPHUKdf4wyTO6+4lJ3pjkt5dVLADAodq27AIAABbofd391aq6JsnRSd4/zb8myc4kj0nyuCRXVFWmdW5eQp0AAIdFoAMAbCV3Jkl331VVX+1vflngXZld91SSa7v7jGUVCACwCIZcAQBHks8lOa6qzkiSqjqmqh675JoAAOYm0AEAjhjd/ZUkz0hyQVV9MsnVSX5wuVUBAMzPz5YDAAAADEYPHQAAAIDBCHQAAAAABiPQAQAAABiMQAcAAABgMAIdAAAAgMEIdAAAAAAGI9ABAAAAGIxABwAAAGAw/w9yi/xWuRzNKQAAAABJRU5ErkJggg==", + "text/plain": [ + "" ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "groundtruth_rttm, _ = google.colab.files.upload().popitem()\n", + "groundtruths = load_rttm(groundtruth_rttm)\n", + "if OWN_FILE['audio'] in groundtruths:\n", + " groundtruth = groundtruths[OWN_FILE['audio']]\n", + "else:\n", + " _, groundtruth = groundtruths.popitem()\n", + "groundtruth" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5MclWK2GYnp_" + }, + "source": [ + "# Speaker diarization with `pyannote.pipeline`\n", + "\n", + "We are about to run a full speaker diarization pipeline, that includes speaker segmentation, speaker embedding, and a final clustering step. **Brace yourself!**\n", + "\n", + "To load the speaker diarization pipeline, \n", + "\n", + "* accept the user conditions on [hf.co/pyannote/speaker-diarization](https://hf.co/pyannote/speaker-diarization)\n", + "* accept the user conditions on [hf.co/pyannote/segmentation](https://hf.co/pyannote/segmentation)\n", + "* login using `notebook_login` below" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 301, + "referenced_widgets": [ + "c8731777ce834e58a76a295076200cfc", + "859b12a6d95b4c6f987791ca848122b9", + "94756148d2e94a93ae233baba20af683", + "ba18cded436e486da34882d821d8f1eb", + "99898e6ee64a46bd832af112e79b58b7", + "79184c8c2a6f4b7493bb7f6983f18a09", + "ea95ffd922c0455d957120f034e541f8", + "13525aa369a9410a83343952ab511f3c", + "b2be65e192384c948fb8987d4cfca505", + "333b42ca7aa44788b1c22724eb11bcc3", + "0e382d66f09f4958a40baa7ab83c4ccb", + "6a45ce374e2e47ba9457d02e02522748", + "765485a1d3f941d28b79782dcffbf401", + "3499ef4dd9f243d9bef00b396e78ed69" + ] }, + "id": "r5u7VMb-YnqB", + "outputId": "c714a997-d4f8-417a-e5ad-0a4924333859", + "vscode": { + "languageId": "python" + } + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "L3FQXT5FYnp-" - }, - "source": [ - "This nice visualization is brought to you by [`pyannote.core`](http://pyannote.github.io/pyannote-core/) and basically indicates when each speaker speaks. " - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Login successful\n", + "Your token has been saved to /root/.huggingface/token\n" + ] + } + ], + "source": [ + "from huggingface_hub import notebook_login\n", + "notebook_login()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 273, + "referenced_widgets": [ + "183c55d5d3ce4058ae338c81344547c5", + "70efa83bf3ea45b4bd8cc41f57613328", + "338747810ac74b4e83e356a01459c8a5", + "ac0bcfa1ef6e4e78a7769c4cb2e8762f", + "6efb7939bb954dc8ba116680139eb257", + "6242493d251a47609c0c44f1dbe82958", + "f439c1de68ac4c799d81fdb29d053d10", + "e4c1e9affaba4045a3ec903091b6f454", + "1946386483ed4947a2184cdb4ea6e434", + "549a30c85c47466eadedbd24da42e304", + "bedc7d916b9745f097094c5c51a81f06", + "d12f07e25bf5422facc38c3463700994", + "eae11f84c2644ada8295b445c924baec", + "bcf766d2a2c641f0aa2af596c7da1b18", + "74bf69aa6eaa4a8594b2ea9a0fb20957", + "2d7a0b901d7044d5b1f273a3e9bea560", + "2cbf0faadd4842c8b22e10541ff9de4e", + "ab32c7daa1d9404fb921f39fbc4fc05c", + "ee537ee5470f4d7b816a8c8f96948b4d", + "652e97509a914f3b914665c4889c6d11", + "ebc9801e164a44b3b6f8dc7f590e1c79", + "0821b47ae70444dfa38b84719c4836a6", + "c3358d32ac814ea6bc5714402c5bc62d", + "ecd8e5e364d34ea8bfbba4fbd467384d", + "0125df9fa8e14b3db0e2bce299529812", + "e3169ca885e04536a709d5751173ce9a", + "70abdfd99be84f7b9b8d24fee9eec022", + "554e567a83b348f88092c6ba01830930", + "6e334cad2e94462cae6e722bd6f11a9e", + "407e250e244b4985b1ce8c9d32a8af7d", + "8127c4258e374ad986ce1f8b4c70f704", + "358c3a67f8b54c4c899e095611fa116b", + "e1c9df12fa034c93a9b3530ea4a7c5aa", + "404f7ce06a01470fbb0b747981d00e84", + "38b3054ad59549e4b4f2de4697139a87", + "7d90af87c9574f5ca21fca058c39bf02", + "fee75343289f42fb8d6dfb4bf26fe368", + "f21c0c6379d74898ac6aadcb6fc14a8a", + "0adb304bf90f4079a4031caea1cfb924", + "40021e0b59fe4e1e9bac351dbec57c6c", + "ed169fd606274f2ebbb3e8f32ab42431", + "304e9682570b4abeb1719001c04449d6", + "16c0017f65b649f5ac5bebf1c955a1fd", + "5e2c207db5424f91829bf5c52040a9f2", + "8011d68253ac4080a637659ef3383dc4", + "e928540e99564d808cb2d12c92daa498", + "fc9a3c4ae0a947ec91a227360a80f602", + "f91dcd9f30c743d69f9d4b7e8d1beba5", + "6ede83f870a24e71b5182fcc458cdc42", + "c9974003727a401797953ef2885db5a2", + "77a361d1ff214e8799891bbeb28a0789", + "27f6f437c5264368bc2c679942ad1e53", + "e7728d9c55e44274966f8f6dbc445c54", + "2b2d7912186a49dd9891ae12c77482c7", + "1600b9cd09c446e581b7912e35c9f56e", + "28004251b0e44a6c9dfa7ce1b30dcb18", + "e98cf7a63c814ffd94f69928f0700ebf", + "6a4dee55cbae4959bd7fe3c4d92242b1", + "8dba487876124827919079519406ecb8", + "5c211704f90946afbae2f66a7586ce70", + "aba21021d3bb4565a58ffa40049810db", + "f7812fa7fbf744c1b261b985d085e28e", + "d7071582bfbe4ec4b2c3c9843e5481ae", + "0d80273cabbc42ba9a408fb1144151c9", + "67fcc38a1e5d4eb39381685447e397de", + "0b4bf8076fdf4d19843a3246c8bd61ac", + "d182e37b4a404158bee8446fc2728bd9", + "603e99f45afb4910a99f7684ffd21b6a", + "d13ba6030aff42bca48c72ff071c44c0", + "a899f4bc6ed842d397723cca582669e6", + "a02030ba8f324d93a7ed6cc793d70a3b", + "b26354d0278f447d92c7e1ad4c211d64", + "3bd33a372aad4c438f64d73c97f14c6a", + "c8e0c9a60ef34d2caee9d55a3c21c3d4", + "764aa53d75324d73ab06936c52fd8fc8", + "341615c971b04033b7293d82fc40f35c", + "17856a72e4e948039a66c51e8244cb50", + "41eb32a6fef141ff9cc3ce6e4d771822", + "0d10fb0edc9144b1a1fc1f2c9e322410", + "32accb0adfa24c62a75c15c8ec88df8c", + "bf299285318b4a04a88569cc581ecd75", + "ac2950d08fc145ba9eb9cf5824b1ee18", + "d33fba0d78fb41f983c55f5cd2a0a740", + "fd47487fc8734594823f8afa00c4239d", + "23d4e25ec6c541818d5927b69576d278", + "54d9456703324160aced03ee5fef2943", + "bacfb50c001047c4824a05c9f2ee2e40", + "c53a1cf68fcd4388abf1f0379891089a" + ] }, + "id": "lUq1UvoJYnqB", + "outputId": "8c052808-d0b2-4f2e-8771-f86114ae3fe3", + "vscode": { + "languageId": "python" + } + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 62 - }, - "id": "rDhZ3bXEYnp-", - "outputId": "a82efe4e-2f9c-48bd-94fb-c62af3a3cb43", - "vscode": { - "languageId": "python" - } + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "183c55d5d3ce4058ae338c81344547c5", + "version_major": 2, + "version_minor": 0 }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from pyannote.audio import Audio \n", - "from IPython.display import Audio as IPythonAudio\n", - "waveform, sr = Audio(mono=\"downmix\").crop(DEMO_FILE, EXCERPT)\n", - "IPythonAudio(waveform.flatten(), rate=sr)" + "text/plain": [ + "Downloading: 0%| | 0.00/500 [00:00\n", - " \n", - " Upload widget is only available when the cell has been executed in the\n", - " current browser session. Please rerun this cell to enable.\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Saving sample.wav to sample.wav\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import google.colab\n", - "own_file, _ = google.colab.files.upload().popitem()\n", - "OWN_FILE = {'audio': own_file}\n", - "notebook.reset()\n", - "\n", - "# load audio waveform and play it\n", - "waveform, sample_rate = Audio(mono=\"downmix\")(OWN_FILE)\n", - "IPythonAudio(data=waveform.squeeze(), rate=sample_rate, autoplay=True)" + "text/plain": [ + "Downloading: 0%| | 0.00/1.92k [00:00 {speaker_name} \n", - "```" + "text/plain": [ + "Downloading: 0%| | 0.00/1.92k [00:00\n", - " \n", - " Upload widget is only available when the cell has been executed in the\n", - " current browser session. Please rerun this cell to enable.\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Saving sample.rttm to sample.rttm\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABHQAAACsCAYAAAAaLvvnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAOHUlEQVR4nO3de6ykZ10H8O+v3YIGCghbG1yqC+WiBWwpa9OKJk2DbQUVURRISCDyhxowXNQEFOzWqEnBtl4AjQVCDYSLgFpBqA1ZBJWCp1As5aJtbFPWUkStbVHLpT//mJdwaLuX2Z1zZp6zn08yOe95b/ObeeeZ951vnmemujsAAAAAjOOoZRcAAAAAwHwEOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoTKrqeVX1msPY/uSq+khVXVNVf1VVD1i37OVVdV1Vfa6qzllMxVvbRh2PqnpIVe2pqjsOZ/8AAACwTAKdBaiqo5O8PsnLuvvxSf48ya9Oy05K8qwkj01ybpLXTeuzQfZ3PJL8X5JXJvmVJZUHAAAAh22oQKeq7ldV762qT1bVp6rqmVV1Q1W9auqJ8bGqeuS07nFV9a6q+sfp9qRp/mlTz41PVNU/VNVj7uV+njqts72qzp6mP15Vf1ZV95/WuaGqLqiqjyf5mSSPTvKhaRdXJPnpafppSd7W3Xd2978muS7JaRv6RG2SEY9Hd3+5u/8us2AHAAAAhjRUoJNZD5d/6+6Tu/txSd4/zf/vqSfGa5L83jTv95Nc3N0/kNmH+ddP8z+b5Ie7+wlJfiPJ76y/g6p6epKXJXnKNOsVSZ7c3acmWUvy0nWr/0d3n9rdb0tybWbhTTILFE6YpnckuWndNp+f5m0FIx4PAAAAGN62w9l4744Tdic5bzGlJEnO37H3pt37WX5Nkgur6oIk7+nuD1dVkrx1Wv7WJBdP009OctK0PEkeMPXmeGCSS6vqUUk6yTHr9n9Wkl1Jzu7u26rqx5KclOTvp/3cJ8lH1q3/9nXTP5fkD6rqlUkuS/KVg37UC3L6eZfvzoKPx5Xnn7N7P8sdDwAAAFiCwwp0Nlt3/3NVnZpZb43fqqoPfGPR+tWmv0clOb27v2VozfRFuHu6++lVtTPJB9ctvj7JIzIbrrOWpJJc0d3P3kdJX15X22eTnD3dx6OTPHVatDff2jvkYdO84Q16PAAAAGB4Qw25qqrvSvI/3f3mJK9Ocuq06Jnr/n6jx8bfJPmlddueMk0+MN8MVJ53t7u4MbPhQH9aVY9NcmWSJ637Hpj7TeHAvdX2ndPfozIbFvTH06LLkjyrqu5bVQ9P8qgkH5vjYa+sQY8HAAAADK+6+8BrrYjpJ79fneSuJF9N8otJ3pnZUJsfTXJnkmd393VVtT3Ja5N8X2Y9kT7U3b9QVWckuTSz3hzvTfKc7t5ZVc9Lsqu7X1hVT0jyliQ/nuR7klyQ5L5TGa/o7suq6oZp/S9Ntb0oyQumdd6d5OU9PblV9euZDQH6WpIXd/f7NuQJ2mQDH48bkjwgsyFbt2Y2pOvTG/AUAQAAwIYYKtC5N3f/IM9yOR4AAACw8YYacgUAAADAFuihAwAAAHCk0UMHAAAAYDACHQAAAIDBCHQAAAAABrNtnpW3b9/eO3fu3KBSAAAAAI48V1111Ze6+7h5tpkr0Nm5c2fW1tbmqwoAAACAfaqqG+fdxpArAAAAgMEIdAAAAAAGI9ABAAAAGIxABwAAAGAwAh0AAACAwQh0AAAAAAYj0AEAAAAYjEAHAAAAYDACHQAAAIDBCHQAAAAABiPQAQAAABiMQAcAAABgMAIdAAAAgMEIdAAAAAAGI9ABAAAAGIxABwAAAGAwAh0AAACAwQh0AAAAAAYj0AEAAAAYjEAHAAAAYDACHQAAAIDBCHQAAAAABiPQAQAAABiMQAcAAABgMAIdAAAAgMEIdAAAAAAGI9ABAAAAGIxABwAAAGAwAh0AAACAwcwV6Hz9llsWeue3XXjRQveXJJfsuW7h+1yEVa1rFW3F52orPibG53XJKtnf63EjrhfgUB3q69F7LnCk8z64eHMFOnctONC5/aKLF7q/JHnDB69f+D4XYVXrWkVb8bnaio+J8Xldskr293rciOsFOFSH+nr0ngsc6bwPLp4hVwAAAACDEegAAAAADGbbvBvs3XHCRtSxUKefd/myS+AwOYawObQ1RjHC9QcciPdcABZJDx0AAACAwQh0AAAAAAYz95CrHXtvWtidb1T36SvPP2dD9ns4dLGdzyoew8Ph+LOqtlpbY1wHep9c5PUHHI7DuX71ngscyXwmWjw9dAAAAAAGI9ABAAAAGIxABwAAAGAwcwU6Rx1//ELv/NiXvmSh+0uS55954sL3uQirWtcq2orP1VZ8TIzP65JVsr/X40ZcL8ChOtTXo/dc4EjnfXDxqrsPeuVdu3b12traBpYDAAAAcGSpqqu6e9c82xhyBQAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoAAAAAAxGoAMAAAAwGIEOAAAAwGAEOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoAAAAAAxGoAMAAAAwGIEOAAAAwGAEOgAAAACDEehsgtsuvGjZJQxvs55DxwruSbtg1Vyy57oh9gkAq8Z13dYi0NkEt1908bJLGN5mPYeOFdyTdsGqecMHrx9inwCwalzXbS0CHQAAAIDBCHQAAAAABiPQAQAAABjMtmUXcKTYu+OEZZfAQXKsAFbf6eddvuwSAGBIPu9sHXroAAAAAAxGoAMAAAAwGEOuNsmOvTctu4ShbWa3QMcKvpVuuayiK88/Z6H7M4QLgCOFzzsrqmruTfTQAQAAABiMQAcAAABgMAKdTXDsS1+y7BKGt1nPoWMF96RdsGqef+aJQ+wTAFaN67qtpbr7oFfetWtXr62tbWA5AAAAAEeWqrqqu3fNs40eOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoAAAAAAxGoAMAAAAwGIEOAAAAwGAEOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAAAAAgxHoAAAAAAxGoAMAAAAwGIEOAAAAwGAEOgAAAACDEegAAAAADEagAwAAADAYgQ4AAADAYAQ6AAAAAIOp7j74lav+PcmNG1cO3KvtSb607CKAQ6L9wpi0XRiTtgvjekx3HzvPBtvmWbm7j5uvHjh8VbXW3buWXQcwP+0XxqTtwpi0XRhXVa3Nu40hVwAAAACDEegAAAAADEagwwj+ZNkFAIdM+4UxabswJm0XxjV3+53rS5EBAAAAWD49dAAAAAAGI9BhpVTVG6vqi1X1qXXzHlxVV1TVv0x/v2OZNQL3tI+2u7uq9lbV1dPtKcusEbinqjqhqvZU1aer6tqqetE037kXVtx+2q/zL6ywqvq2qvpYVX1yarvnT/MfXlUfrarrqurtVXWfA+1LoMOqeVOSc+8272VJPtDdj0rygel/YLW8Kfdsu0lycXefMt3+epNrAg7sa0l+ubtPSnJ6khdU1Ulx7oUR7Kv9Js6/sMruTHJWd5+c5JQk51bV6UkuyKztPjLJfyV5/oF2JNBhpXT3h5L8591mPy3JpdP0pUl+clOLAg5oH20XWHHdfXN3f3yavj3JZ5LsiHMvrLz9tF9ghfXMHdO/x0y3TnJWkndO8w/q3CvQYQTHd/fN0/QXkhy/zGKAubywqv5pGpJlyAassKrameQJST4a514Yyt3ab+L8Cyutqo6uqquTfDHJFUmuT3Jrd39tWuXzOYiAVqDDUHr2s2x+mg3G8EdJTsysK+nNSS5cbjnAvlTV/ZO8K8mLu/u29cuce2G13Uv7df6FFdfdX+/uU5I8LMlpSb73UPYj0GEEt1TVQ5Nk+vvFJdcDHITuvmU6Wd2V5JLMTlbAiqmqYzL7MPiW7n73NNu5FwZwb+3X+RfG0d23JtmT5IwkD6qqbdOihyXZe6DtBTqM4LIkz52mn5vkL5dYC3CQvvFhcPL0JJ/a17rAclRVJXlDks9090XrFjn3worbV/t1/oXVVlXHVdWDpulvT/IjmX0H1p4kz5hWO6hzb8160cJqqKq3JjkzyfYktyQ5L8lfJHlHku9OcmOSn+1uX74KK2QfbffMzLp7d5Ibkvz8uu/kAFZAVf1Qkg8nuSbJXdPsX8vsezice2GF7af9PjvOv7Cyqur7M/vS46Mz62Tzju7+zap6RJK3JXlwkk8keU5337nffQl0AAAAAMZiyBUAAADAYAQ6AAAAAIMR6AAAAAAMRqADAAAAMBiBDgAAAMBgBDoAwMqrqodU1dXT7QtVtXeavqOqXrfs+gAANpufLQcAhlJVu5Pc0d2/u+xaAACWRQ8dAGBYVXVmVb1nmt5dVZdW1Yer6saq+qmqelVVXVNV76+qY6b1nlhVf1tVV1XV5VX10OU+CgCA+Ql0AICt5MQkZyX5iSRvTrKnux+f5H+TPHUKdf4wyTO6+4lJ3pjkt5dVLADAodq27AIAABbofd391aq6JsnRSd4/zb8myc4kj0nyuCRXVFWmdW5eQp0AAIdFoAMAbCV3Jkl331VVX+1vflngXZld91SSa7v7jGUVCACwCIZcAQBHks8lOa6qzkiSqjqmqh675JoAAOYm0AEAjhjd/ZUkz0hyQVV9MsnVSX5wuVUBAMzPz5YDAAAADEYPHQAAAIDBCHQAAAAABiPQAQAAABiMQAcAAABgMAIdAAAAgMEIdAAAAAAGI9ABAAAAGIxABwAAAGAw/w9yi/xWuRzNKQAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "groundtruth_rttm, _ = google.colab.files.upload().popitem()\n", - "groundtruths = load_rttm(groundtruth_rttm)\n", - "if OWN_FILE['audio'] in groundtruths:\n", - " groundtruth = groundtruths[OWN_FILE['audio']]\n", - "else:\n", - " _, groundtruth = groundtruths.popitem()\n", - "groundtruth" + "text/plain": [ + "Downloading: 0%| | 0.00/5.53M [00:00" ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "diarization" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DLhErS6wYnqB" + }, + "source": [ + "# Evaluation with `pyannote.metrics`\n", + "\n", + "Because groundtruth is available, we can evaluate the quality of the diarization pipeline by computing the [diarization error rate](http://pyannote.github.io/pyannote-metrics/reference.html#diarization)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vNHQRTUIYnqB", + "vscode": { + "languageId": "python" + } + }, + "outputs": [], + "source": [ + "from pyannote.metrics.diarization import DiarizationErrorRate\n", + "metric = DiarizationErrorRate()\n", + "der = metric(groundtruth, diarization)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "9d0vKQ0fYnqB", + "outputId": "9a664753-cd84-4211-9153-d33e929bb252", + "vscode": { + "languageId": "python" + } + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 273, - "referenced_widgets": [ - "183c55d5d3ce4058ae338c81344547c5", - "70efa83bf3ea45b4bd8cc41f57613328", - "338747810ac74b4e83e356a01459c8a5", - "ac0bcfa1ef6e4e78a7769c4cb2e8762f", - "6efb7939bb954dc8ba116680139eb257", - "6242493d251a47609c0c44f1dbe82958", - "f439c1de68ac4c799d81fdb29d053d10", - "e4c1e9affaba4045a3ec903091b6f454", - "1946386483ed4947a2184cdb4ea6e434", - "549a30c85c47466eadedbd24da42e304", - "bedc7d916b9745f097094c5c51a81f06", - "d12f07e25bf5422facc38c3463700994", - "eae11f84c2644ada8295b445c924baec", - "bcf766d2a2c641f0aa2af596c7da1b18", - "74bf69aa6eaa4a8594b2ea9a0fb20957", - "2d7a0b901d7044d5b1f273a3e9bea560", - "2cbf0faadd4842c8b22e10541ff9de4e", - "ab32c7daa1d9404fb921f39fbc4fc05c", - "ee537ee5470f4d7b816a8c8f96948b4d", - "652e97509a914f3b914665c4889c6d11", - "ebc9801e164a44b3b6f8dc7f590e1c79", - "0821b47ae70444dfa38b84719c4836a6", - "c3358d32ac814ea6bc5714402c5bc62d", - "ecd8e5e364d34ea8bfbba4fbd467384d", - "0125df9fa8e14b3db0e2bce299529812", - "e3169ca885e04536a709d5751173ce9a", - "70abdfd99be84f7b9b8d24fee9eec022", - "554e567a83b348f88092c6ba01830930", - "6e334cad2e94462cae6e722bd6f11a9e", - "407e250e244b4985b1ce8c9d32a8af7d", - "8127c4258e374ad986ce1f8b4c70f704", - "358c3a67f8b54c4c899e095611fa116b", - "e1c9df12fa034c93a9b3530ea4a7c5aa", - "404f7ce06a01470fbb0b747981d00e84", - "38b3054ad59549e4b4f2de4697139a87", - "7d90af87c9574f5ca21fca058c39bf02", - "fee75343289f42fb8d6dfb4bf26fe368", - "f21c0c6379d74898ac6aadcb6fc14a8a", - "0adb304bf90f4079a4031caea1cfb924", - "40021e0b59fe4e1e9bac351dbec57c6c", - "ed169fd606274f2ebbb3e8f32ab42431", - "304e9682570b4abeb1719001c04449d6", - "16c0017f65b649f5ac5bebf1c955a1fd", - "5e2c207db5424f91829bf5c52040a9f2", - "8011d68253ac4080a637659ef3383dc4", - "e928540e99564d808cb2d12c92daa498", - "fc9a3c4ae0a947ec91a227360a80f602", - "f91dcd9f30c743d69f9d4b7e8d1beba5", - "6ede83f870a24e71b5182fcc458cdc42", - "c9974003727a401797953ef2885db5a2", - "77a361d1ff214e8799891bbeb28a0789", - "27f6f437c5264368bc2c679942ad1e53", - "e7728d9c55e44274966f8f6dbc445c54", - "2b2d7912186a49dd9891ae12c77482c7", - "1600b9cd09c446e581b7912e35c9f56e", - "28004251b0e44a6c9dfa7ce1b30dcb18", - "e98cf7a63c814ffd94f69928f0700ebf", - "6a4dee55cbae4959bd7fe3c4d92242b1", - "8dba487876124827919079519406ecb8", - "5c211704f90946afbae2f66a7586ce70", - "aba21021d3bb4565a58ffa40049810db", - "f7812fa7fbf744c1b261b985d085e28e", - "d7071582bfbe4ec4b2c3c9843e5481ae", - "0d80273cabbc42ba9a408fb1144151c9", - "67fcc38a1e5d4eb39381685447e397de", - "0b4bf8076fdf4d19843a3246c8bd61ac", - "d182e37b4a404158bee8446fc2728bd9", - "603e99f45afb4910a99f7684ffd21b6a", - "d13ba6030aff42bca48c72ff071c44c0", - "a899f4bc6ed842d397723cca582669e6", - "a02030ba8f324d93a7ed6cc793d70a3b", - "b26354d0278f447d92c7e1ad4c211d64", - "3bd33a372aad4c438f64d73c97f14c6a", - "c8e0c9a60ef34d2caee9d55a3c21c3d4", - "764aa53d75324d73ab06936c52fd8fc8", - "341615c971b04033b7293d82fc40f35c", - "17856a72e4e948039a66c51e8244cb50", - "41eb32a6fef141ff9cc3ce6e4d771822", - "0d10fb0edc9144b1a1fc1f2c9e322410", - "32accb0adfa24c62a75c15c8ec88df8c", - "bf299285318b4a04a88569cc581ecd75", - "ac2950d08fc145ba9eb9cf5824b1ee18", - "d33fba0d78fb41f983c55f5cd2a0a740", - "fd47487fc8734594823f8afa00c4239d", - "23d4e25ec6c541818d5927b69576d278", - "54d9456703324160aced03ee5fef2943", - "bacfb50c001047c4824a05c9f2ee2e40", - "c53a1cf68fcd4388abf1f0379891089a" - ] - }, - "id": "lUq1UvoJYnqB", - "outputId": "8c052808-d0b2-4f2e-8771-f86114ae3fe3", - "vscode": { - "languageId": "python" - } - }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "183c55d5d3ce4058ae338c81344547c5", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Downloading: 0%| | 0.00/500 [00:00" ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "mapping = metric.optimal_mapping(groundtruth, diarization)\n", + "diarization.rename_labels(mapping=mapping)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 230 }, + "id": "Z0ewsLlQYnqB", + "outputId": "8a8cd040-ee1d-48f7-d4be-eef9e08e9e55", + "vscode": { + "languageId": "python" + } + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 230 - }, - "id": "DPosdyGrYnqB", - "outputId": "45a2315e-6841-4de4-e54e-1f3da7cf2d46", - "vscode": { - "languageId": "python" - } - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3df3DV1Z0//teFhB8SEhAkAQXUFUUsKmpXqW61iIAylUW+2rKV+qOFkVKd2l3LtIM/2n501Y7D2rWtPxYtXbrWoUVbdV0Fiy4WhIJSCjqoLEpbCSjILyWC5P39o+XWNEASyCX3hMdj5k7gvs8959ybc1/vd97P5H1zWZZlAQAAAAAAkLA2LT0BAAAAAACAAyXwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifw2It33nknJk6cGH369In27dtHVVVVDB8+PH7zm99ERMTRRx8duVwucrlcdOrUKU477bSYOXNm/vG33HJLfvvHb/3796831sMPPxxt27aNSZMm1dv23HPPRS6Xi02bNuXve/vtt2PgwIHx6U9/OjZv3pxvs6dbdXV1vfm0bds2evfuHRMmTIiNGzc2+jWpqamJSZMmRbdu3aKsrCzGjBkT69atq9NmzZo1MXLkyDjssMOiR48eccMNN8RHH33U6DEOJdZYfY1ZY9ddd12cfvrp0b59+zj11FMb3fehyjqrr6F19rvf/S7Gjh0bvXv3jo4dO8aJJ54Yd999d6P7BwAAAKBllLTIqO+/c3DH63REkx8yZsyY2LFjR0yfPj2OPfbYWLduXTz77LOxYcOGfJvvfOc7MX78+NiyZUvcdddd8bnPfS6OPPLI+NSnPhURESeddFLMmTOnTr8lJfVf8mnTpsU3vvGNuO++++Kuu+6KDh067HVeq1atigsuuCAGDBgQM2fOjI4dO+a3rVy5MsrLy+u079GjR/7fu+eza9euePXVV+Pqq6+OzZs3xyOPPNKo1+T666+PJ598MmbOnBkVFRXx1a9+NS655JL8idNdu3bFyJEjo6qqKubPnx9r166NL37xi1FaWhq33XZbo8ZoXu8dxLG6NvkR1lh9Da2x3a6++upYuHBhLFu2rFH9FlLNlsafaD9QHcoPb/JjrLP6GlpnS5YsiR49esSMGTOid+/eMX/+/JgwYUK0bds2vvrVrzZqDAAAAAAOvpYJPL7Xo+E2zemWrEnNN23aFPPmzYvnnnsuzj333IiI6Nu3b/z93/99nXadO3eOqqqqqKqqih/84AcxY8aMePzxx/MnCUtKSqKqqmqfY61evTrmz58fv/jFL2Lu3Lkxa9as+Kd/+qc9tl22bFkMHz48hgwZEtOnT693wrFHjx7RpUuXvY718fkceeSRcemll8ZDDz207xfjLzZv3hzTpk2L//qv/4ohQ4ZERMRDDz0UJ554Yrz44otx1llnxTPPPBOvvPJKzJkzJyorK+PUU0+N7373uzF58uS45ZZbol27do0aq/lccBDHWtyk1tZYfY1ZYxER3//+9yPiz3+5UAyBx6Pjzz5oY4195NUmtbfO6mvMOrv66qvrPObYY4+NBQsWxKxZswQeAAAAAEXMJa32oKysLMrKyuKxxx6LDz/8sFGPKSkpidLS0tixY0eTxnrooYdi5MiRUVFREZdffnlMmzZtj+3mz58f5557bowZMyZmzJixx9+uboo333wznn766UaHEEuWLImdO3fG0KFD8/f1798/+vTpEwsWLIiIiAULFsTAgQOjsrIy32b48OGxZcuWWLFixQHNt7WxxuprzBqjaayz+vZ3nW3evDkOP7zpf2EDAAAAwMEj8NiDkpKS+PGPfxzTp0+PLl26xNlnnx3f+ta39vrb5Dt27Ih//dd/jc2bN+d/Yzgi4ve//33+hOPu2zXXXJPfXltbGz/+8Y/j8ssvj4iIz3/+8/HCCy/E6tWr640xevTo+OxnPxv33HNP5HK5Pc7jqKOOqjPWSSedVGf77vl07NgxjjnmmFixYkVMnjy5Ua9JdXV1tGvXrt5vXVdWVuavrV9dXV0n7Ni9ffc2/soaq68xa4ymsc7q2591Nn/+/HjkkUdiwoQJjRoDAAAAgJbRMpe0SsCYMWNi5MiRMW/evHjxxRfjqaeeijvvvDP+4z/+I6688sqIiJg8eXJMmTIlampqoqysLG6//fYYOXJkvo8TTjghfvWrX9Xp9+PXpZ89e3a8//77cdFFF0VERPfu3eOCCy6IBx98ML773e/WedyoUaPi0UcfjXnz5sU//MM/7HHO8+bNi86dO+f/X1paWmf77vnU1NTEjBkzYunSpXHttdc2/cWhWVhjHAzW2YFZvnx5jBo1Km6++eYYNmxYQcYAAAAAoHm0TOBxw/oWGbapOnToEBdccEFccMEFceONN8aXv/zluPnmm/MnCW+44Ya48soro6ysLCorK+v9tnK7du3iuOOO22v/06ZNi40bN9b5sN7a2tpYtmxZfPvb3442bf76Bzj33XdffOMb34gLL7ww/vu//zs+/elP1+vvmGOO2ed17z8+n90nNL/97W/XOyG5J1VVVbFjx47YtGlTnTHWrVuXv5Z+VVVVLFq0qM7j1q1bl9928M1ugTGbxhr7q8assWI0+oHfNNyohVlnf9WUdfbKK6/E+eefHxMmTIgpU6Y02DcAAAAALatlAo9OR7TIsAdqwIAB8dhjj+X/3717932eBNyXDRs2xC9/+cv42c9+VudyLbt27YpzzjknnnnmmRgxYkT+/lwuF/fff3+0adMmLrroonjyySfzH0K8v6ZMmRJDhgyJiRMnRq9evfbZ9vTTT4/S0tJ49tlnY8yYMRERsXLlylizZk0MHjw4IiIGDx4ct956a6xfvz569PjzB9PPnj07ysvLY8CAAQc01/3TtQXGPDDW2L7XWDHqUJ7e5zpYZw2vsxUrVsSQIUPiiiuuiFtvvfWA5gcAAADAweGSVnuwYcOGuPTSS+Pqq6+Ok08+OTp37hyLFy+OO++8M0aNGtXofj766KN614TP5XJRWVkZ//mf/xndunWLyy67rN5vU1900UUxbdq0OicJdz/23nvvjbZt2+ZPFJ533nn57evXr4+ampo6j+nWrVu9y8HsNnjw4Dj55JPjtttui3vuuWefz6WioiK+9KUvxde//vU4/PDDo7y8PK699toYPHhwnHXWWRERMWzYsBgwYECMGzcu7rzzzqiuro4pU6bEpEmTon379vvs/1BjjdXXmDUWEfHGG2/Etm3borq6OrZv3x5Lly6NiD+fxG/sB1cfKqyz+hqzzpYvXx5DhgyJ4cOHx9e//vX8c2/btm0ccUSagT0AAADAoUDgsQdlZWVx5plnxtSpU2PVqlWxc+fO6N27d4wfPz6+9a1vNbqfFStWRM+ePevc1759+6ipqYkHH3wwRo8evccP7R0zZkyMGzcu3n333Xrbcrlc/OAHP4g2bdrEyJEj44knnsj3ccIJJ9Rrv2DBgjoni//W9ddfH1deeWVMnjw5evfuvc/nM3Xq1GjTpk2MGTMmPvzwwxg+fHj88Ic/zG9v27ZtPPHEEzFx4sQYPHhwdOrUKa644or4zne+s89+D0XW2J41tMYiIr785S/H888/n///oEGDIiJi9erVcfTRR++z/0ONdbZnDa2zn//85/HOO+/EjBkzYsaMGfn7+/btG2+++eY++wYAAACg5eSyLMtaehIAAAAAAAAHok3DTQAAAAAAAIqbwIOIiPjpT38aZWVle7x9/IOIYX9ZYxwM1hkAAADAocslrYiIiK1bt8a6dev2uK20tDT69u17kGdEa2ONcTBYZwAAAACHLoEHAAAAAACQPJe0AgAAAAAAkifwAAAAAAAAkldSqI5ra2vj7bffjs6dO0culyvUMAAAAAAAQAKyLIutW7dGr169ok2b5v97jIIFHm+//Xb07t27UN0DAAAAAAAJ+sMf/hBHHXVUs/dbsMCjc+fOEfHniZeXlxdqGAAAAAAAIAFbtmyJ3r175/OD5lawwGP3ZazKy8sFHgAAAAAAQEREwT4Gw4eWAwAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAySt84LG1uuBDQNHYujZi7i1//rrf3o2I+/7yFYqBNUkr1Jh63Sw1vYhsXRsx96sRb4yJ2LZiHw2952kpxb72in1+rdHBeM2L/fta7PMDONQUa10u1nkdqNb6vCikwgce2wQeHEK2ro14/tvNEHg8EIo5xcOapBVqTL1ulppeRLaujXjtwYjj3orY/to+GnrP01KKfe0V+/xao4Pxmhf797XY5wdwqCnWulys8zpQrfV5UUguaQUAAAAAACRP4AEAAAAAACRP4AEAAAAAACSvpOAj1GyKeP+dgg8DRaHmvWbsbEtENGd/sL+2tPQEoHBq3tv7cUqz1vQik/sg9r6P8Z6npRXrMZD3Rssp5JpI5ftarO8LgENNse83Wtv+othfb4pR4QOPn42KaF/wUaAV+kpLTwCg9fvJ0JaeQcvoPjUiprb0LGAvHAPxt6wJrwEAjWN/AS5pBQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJK/wn+Hx+V9GHDe44MNAUVi3rBmvB//DiOjXTH3BgXg9XAeUVuuLcyIqT97ztmat6UXm3esjul+0l43e87S0Yj0G8t5oOYVcE6l8X4v1fQFwqCn2/UZr218U++tNMSp84NGhS0SnIwo+DBSFDl2bsbPyiGjO/mB/lbf0BKBwOnTd+3FKs9b0IpMdFnvfx3jP09KK9RjIe6PlFHJNpPJ9Ldb3BcChptj3G61tf1HsrzfFyCWtAAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5BU+8CirKvgQUDQ694w49+Y/f91v3SNi/F++QjGwJmmFGlOvm6WmF5HOPSOOvzrijb4RHY/fR0PveVpKsa+9Yp9fa3QwXvNi/74W+/wADjXFWpeLdV4HqrU+Lwopl2VZVoiOt2zZEhUVFbF58+YoLy8vxBAAAAAAAEAiCp0buKQVAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIFHst6NiPv+8rU1jAPsr+3vrY/fz7wntr+3vqWnAkArlvr+JvX5U5ysKwDYP/ahh67t771T0P4FHsl6NyIeiIMTeByMcYD9tf29d2L5z39Q8B0GAIe21Pc3qc+f4mRdAcD+sQ89dG3fVNjzzAIPAAAAAAAgeQIPAAAAAAAgeSUtPQEO1JaIeK/A/QMp2PH+lqjZsrGlpwFAK7Xj/dZxXGh/SXNqLe8LAGgpjs0OPTs/2FrQ/gUeyftKS08AKBJz/9/VLT0FACh69pcAAMXDsdmh54Oduwrav0taAQAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyfMZHsn7YUT0K2D/r4fPCYE0fGbKg9Gl7wktPQ0AWqlNb61sFddYtr+kObWW9wUAtBTHZoeeP6x4KeLRCwrWv8AjeeUR0bXA/QMpaNepPDqUH97S0wCglWrXqXUcF9pf0pxay/sCAFqKY7NDT+lhnQvav0taAQAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAyRN4JKt7RIz/y9fWMA6wvzp2PSI+8f9Nio5dj2jpqQDQiqW+v0l9/hQn6woA9o996KGrY5fCnmfOZVmWFaLjLVu2REVFRWzevDnKy8sLMQQAAAAAAJCIQucG/sIDAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABIXkmhOs6yLCIitmzZUqghAAAAAACAROzOC3bnB82tYIHHhg0bIiKid+/ehRoCAAAAAABIzIYNG6KioqLZ+y1Y4HH44YdHRMSaNWsKMnEgDVu2bInevXvHH/7whygvL2/p6QAtQB0A1AEgQi0A1AEgYvPmzdGnT598ftDcChZ4tGnz548HqaioUMCAKC8vVwvgEKcOAOoAEKEWAOoA8Nf8oNn7LUivAAAAAAAAB5HAAwAAAAAASF7bW2655ZaCdd62bZx33nlRUlKwK2cBCVALAHUAUAeACLUAUAeAwtaBXJZlWbP3CgAAAAAAcBC5pBUAAAAAAJA8gQcAAAAAAJA8gQcAAAAAAJA8gQcAAAAAAJC8Jgcef/rTn+Lyyy+Pbt26RceOHWPgwIGxePHi/PYsy+Kmm26Knj17RseOHWPo0KHx+uuv1+lj48aN8YUvfCHKy8ujS5cu8aUvfSm2bdt24M8GOCgaqgOzZs2KYcOGRbdu3SKXy8XSpUvr9VFTUxOTJk2Kbt26RVlZWYwZMybWrVt3MJ8GcID2VQt27twZkydPjoEDB0anTp2iV69e8cUvfjHefvvtOn04JoC0NXRMcMstt0T//v2jU6dO0bVr1xg6dGgsXLiwTh/qAKStoTrwcddcc03kcrn4t3/7tzr3qwOQvoZqwZVXXhm5XK7ObcSIEXX6UAsgbY05Jnj11Vfj4osvjoqKiujUqVN88pOfjDVr1uS3N8f5wiYFHu+9916cffbZUVpaGk899VS88sorcdddd0XXrl3zbe688874/ve/H/fee28sXLgwOnXqFMOHD4+ampp8my984QuxYsWKmD17djzxxBPxv//7vzFhwoQmTRxoGY2pA++//36cc845cccdd+y1n+uvvz4ef/zxmDlzZjz//PPx9ttvxyWXXHIwngLQDBqqBR988EG89NJLceONN8ZLL70Us2bNipUrV8bFF19cpx/HBJCuxhwTHH/88XHPPffE73//+3jhhRfi6KOPjmHDhsU777yTb6MOQLoaUwd2e/TRR+PFF1+MXr161dumDkDaGlsLRowYEWvXrs3fHn744Trb1QJIV2PqwKpVq+Kcc86J/v37x3PPPRfLli2LG2+8MTp06JBv0yznC7MmmDx5cnbOOefsdXttbW1WVVWVfe9738vft2nTpqx9+/bZww8/nGVZlr3yyitZRGS//e1v822eeuqpLJfLZX/605+aMh2gBTRUBz5u9erVWURkL7/8cp37N23alJWWlmYzZ87M3/fqq69mEZEtWLCgWecLFEZTasFuixYtyiIie+utt7Isc0wAqdufOrB58+YsIrI5c+ZkWaYOQOoaWwf++Mc/ZkceeWS2fPnyrG/fvtnUqVPz29QBSF9jasEVV1yRjRo1aq/b1QJIW2PqwOc+97ns8ssv3+v25jpf2KS/8PjVr34VZ5xxRlx66aXRo0ePGDRoUDzwwAP57atXr47q6uoYOnRo/r6Kioo488wzY8GCBRERsWDBgujSpUucccYZ+TZDhw6NNm3a1PvzdqD4NFQHGmPJkiWxc+fOOrWif//+0adPn3ytAIrb/tSCzZs3Ry6Xiy5dukSEYwJIXVPrwI4dO+L++++PioqKOOWUUyJCHYDUNaYO1NbWxrhx4+KGG26Ik046qV4f6gCkr7HHBM8991z06NEjTjjhhJg4cWJs2LAhv00tgLQ1VAdqa2vjySefjOOPPz6GDx8ePXr0iDPPPDMee+yxfJvmOl/YpMDj//7v/+JHP/pR9OvXL55++umYOHFiXHfddTF9+vSIiKiuro6IiMrKyjqPq6yszG+rrq6OHj161NleUlIShx9+eL4NULwaqgONUV1dHe3atcuf9Nzt47UCKG5NrQU1NTUxefLkGDt2bJSXl0eEYwJIXWPrwBNPPBFlZWXRoUOHmDp1asyePTu6d+8eEeoApK4xdeCOO+6IkpKSuO666/bYhzoA6WtMLRgxYkT85Cc/iWeffTbuuOOOeP755+PCCy+MXbt2RYRaAKlrqA6sX78+tm3bFrfffnuMGDEinnnmmRg9enRccskl8fzzz0dE850vLGnKxGtra+OMM86I2267LSIiBg0aFMuXL4977703rrjiiqZ0BSRKHQAimlYLdu7cGZdddllkWRY/+tGPWmK6QAE0tg585jOfiaVLl8a7774bDzzwQFx22WWxcOHCeic1gPQ0VAeWLFkSd999d7z00kuRy+VaeLZAoTTmmODzn/98vv3AgQPj5JNPjr/7u7+L5557Ls4///wWmTfQfBqqA7W1tRERMWrUqLj++usjIuLUU0+N+fPnx7333hvnnntus82lSX/h0bNnzxgwYECd+0488cT8J6lXVVVFRNT75PR169blt1VVVcX69evrbP/oo49i48aN+TZA8WqoDjRGVVVV7NixIzZt2lTn/o/XCqC4NbYW7A473nrrrZg9e3b+rzsiHBNA6hpbBzp16hTHHXdcnHXWWTFt2rQoKSmJadOmRYQ6AKlrqA7Mmzcv1q9fH3369ImSkpIoKSmJt956K/75n/85jj766IhQB6A12J/zBMcee2x079493njjjYhQCyB1DdWB7t27R0lJSYPZQnOcL2xS4HH22WfHypUr69z32muvRd++fSMi4phjjomqqqp49tln89u3bNkSCxcujMGDB0dExODBg2PTpk2xZMmSfJtf//rXUVtbG2eeeWZTpgO0gIbqQGOcfvrpUVpaWqdWrFy5MtasWZOvFUBxa0wt2B12vP766zFnzpzo1q1bnfaOCSBt+3tMUFtbGx9++GFEqAOQuobqwLhx42LZsmWxdOnS/K1Xr15xww03xNNPPx0R6gC0BvtzTPDHP/4xNmzYED179owItQBS11AdaNeuXXzyk5/cZ5tmO1/Y6I83z7Js0aJFWUlJSXbrrbdmr7/+evbTn/40O+yww7IZM2bk29x+++1Zly5dsl/+8pfZsmXLslGjRmXHHHNMtn379nybESNGZIMGDcoWLlyYvfDCC1m/fv2ysWPHNmUqQAtpTB3YsGFD9vLLL2dPPvlkFhHZz372s+zll1/O1q5dm29zzTXXZH369Ml+/etfZ4sXL84GDx6cDR48uCWeErAfGqoFO3bsyC6++OLsqKOOypYuXZqtXbs2f/vwww/z/TgmgHQ1VAe2bduWffOb38wWLFiQvfnmm9nixYuzq666Kmvfvn22fPnyfD/qAKSrMT8b/K2+fftmU6dOrXOfOgBpa6gWbN26NfuXf/mXbMGCBdnq1auzOXPmZKeddlrWr1+/rKamJt+PWgDpaswxwaxZs7LS0tLs/vvvz15//fXs3//937O2bdtm8+bNy7dpjvOFTQo8sizLHn/88ewTn/hE1r59+6x///7Z/fffX2d7bW1tduONN2aVlZVZ+/bts/PPPz9buXJlnTYbNmzIxo4dm5WVlWXl5eXZVVddlW3durWpUwFaSEN14KGHHsoiot7t5ptvzrfZvn179pWvfCXr2rVrdthhh2WjR4+uE4gAxW9ftWD16tV7rAMRkc2dOzffzjEBpG1fdWD79u3Z6NGjs169emXt2rXLevbsmV188cXZokWL6vShDkDaGvrZ4G/tKfBQByB9+6oFH3zwQTZs2LDsiCOOyEpLS7O+fftm48ePz6qrq+v0oRZA2hpzTDBt2rTsuOOOyzp06JCdcsop2WOPPVZne3OcL8xlWZY14a9TAAAAAAAAik6TPsMDAAAAAACgGAk8AAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8AACAA3bllVfGP/7jP7b0NAAAgENYSUtPAAAAKG65XG6f22+++ea4++67I8uygzQjAACA+gQeAADAPq1duzb/70ceeSRuuummWLlyZf6+srKyKCsra4mpAURkWjEAAAKCSURBVAAA5LmkFQAAsE9VVVX5W0VFReRyuTr3lZWV1buk1XnnnRfXXnttfO1rX4uuXbtGZWVlPPDAA/H+++/HVVddFZ07d47jjjsunnrqqTpjLV++PC688MIoKyuLysrKGDduXLz77rsH+ykDAAAJEngAAAAFMX369OjevXssWrQorr322pg4cWJceuml8alPfSpeeumlGDZsWIwbNy4++OCDiIjYtGlTDBkyJAYNGhSLFy+O//mf/4l169bFZZdd1sLPBAAASIHAAwAAKIhTTjklpkyZEv369YtvfvOb0aFDh+jevXuMHz8++vXrFzfddFNs2LAhli1bFhER99xzTwwaNChuu+226N+/fwwaNCgefPDBmDt3brz22mst/GwAAIBi5zM8AACAgjj55JPz/27btm1069YtBg4cmL+vsrIyIiLWr18fERG/+93vYu7cuXv8PJBVq1bF8ccfX+AZAwAAKRN4AAAABVFaWlrn/7lcrs59uVwuIiJqa2sjImLbtm3x2c9+Nu644456ffXs2bOAMwUAAFoDgQcAAFAUTjvttPjFL34RRx99dJSU+FEFAABoGp/hAQAAFIVJkybFxo0bY+zYsfHb3/42Vq1aFU8//XRcddVVsWvXrpaeHgAAUOQEHgAAQFHo1atX/OY3v4ldu3bFsGHDYuDAgfG1r30tunTpEm3a+NEFAADYt1yWZVlLTwIAAAAAAOBA+DUpAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgef8/bx6E89WIYcEAAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "diarization" + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3de3RV5Zk/8CeQCzEhCRcDwXJTRLygoHQs6kztVAHrEmtntFJEpY5WB7V2quO4vNE6WmlrqaOzdGS0Y50q01bbsVbrXSvC4JVaKwuRCrZKoCYSAkJAsn9/8OPUlEsSck5Odvh81spaZO/3vPvd593nOfucL9m7IEmSJAAAAAAAAFKsR74HAAAAAAAA0FECDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYHHDpx99tlRUFCw3c9bb72103WTJk3KPH7YsGE7bHPjjTdm2rzzzjtx4oknxl577RXV1dVx2WWXxUcffZRZv3LlyvjSl74UI0eOjB49esQll1yy3TgfeOCBGDduXFRVVUVZWVmMGTMm7rnnntw+OSmXlrmNiFizZk3MmDEjampqoqSkJEaOHBkPP/xw7p6cFNs2d+eff/5262bMmBEFBQVx9tlnt2jbVeZ5m7lz50ZBQUF8/vOf7+Cz0b1le64jIubPnx+f+9znok+fPtGrV68YPXp0fO9734stW7a0aFdfXx9Tp06NioqKqKqqinPOOSfWrVuXWb9x48Y4++yzY/To0VFYWLjDuXzmmWd2OKba2tosPDsAAAAAe7bCfGz0g/WbOnV7fcqK2/2YSZMmxQ9+8IMWy/bee++drispKWnx+ze/+c0499xzWyzr3bt3RERs2bIlTjzxxBg4cGDMnz8/Vq5cGWeeeWYUFRXFDTfcEBERTU1Nsffee8dVV10Vs2fP3uEY+/btG1deeWWMGjUqiouL46GHHorp06dHdXV1TJw4sd373FENTQ2dur3Kksrdelwa5nbTpk1x/PHHR3V1dfz0pz+NffbZJ1asWBFVVVW7tc8dtaFhY6dur7SyV7sfM3jw4Jg7d27Mnj07SktLI2LrF9D33ntvDBkypEXbrjLP2yxfvjwuvfTS+Ou//ut273c2bamr69Tt9ezXb7cel825/tnPfhannXZaTJ8+PZ5++umoqqqKJ554Iv75n/85FixYED/+8Y+joKAgIiKmTp0aK1eujMcffzw2b94c06dPj/POOy/uvffeiNh6XJSWlsbFF18c999//y73YcmSJVFRUZH5vbq6ereeCwAAAAD+LC+BxwnffrpTt/d/32j/l/8lJSUxcODAdq/bpnfv3jtt89hjj8Ubb7wRTzzxRAwYMCDGjBkT1113XVx++eUxc+bMKC4ujmHDhsXNN98cERF33XXXDvs59thjW/z+1a9+Ne6+++6YN29eXgKPaY98qVO39+Dnf7lbj0vD3N51111RX18f8+fPj6KioojY+lcH+fLDM3/aqdv7yv+e0e7HHH744bFs2bJ44IEHYurUqRGx9a+ghgwZEsOHD2/RtqvMc8TWL8mnTp0a3/jGN+K5556LNWvWtGe3s6r20DGdur193v3Dbj0uW3O9fv36OPfcc2Py5Mlxxx13ZJb/wz/8QwwYMCAmT54cP/7xj+OLX/xiLF68OH71q1/Fiy++GOPGjYuIiFtuuSU+97nPxXe/+90YNGhQlJWVxW233RYREc8///wu57K6ujpvASYAAABAd+WSVnmwYMGCGD16dAwYMCCzbOLEibF27dr43e9+t1t9JkkSTz75ZCxZsiT+5m/+JltDpZ2yNbcPPvhgjB8/PmbMmBEDBgyIQw45JG644YbtLrFDS1/+8pdb/G/+u+66K6ZPn5717WTzNfzNb34zqqur45xzzsn2MLu1bMz1Y489FnV1dXHppZdut+6kk06KkSNHxn333RcRW+e8qqoqE3ZERBx33HHRo0ePWLhwYbvHP2bMmKipqYnjjz8+nn/++XY/HgAAAIDtCTx24qGHHory8vLMz6mnnrrTdeXl5ZnL2Gxz+eWXb9fmueeei4iI2traFl+URkTm9/Zex72hoSHKy8ujuLg4TjzxxLjlllvi+OOP351d3mOkYW5///vfx09/+tPYsmVLPPzww3H11VfHTTfdFP/6r/+6u7u9RzjjjDNi3rx5sWLFilixYkU8//zzccYZ2/+1SFeZ53nz5sWdd94Zc+bMae+u7vGyMddvvvlmREQceOCBO9zGqFGjMm1qa2u3u+xUYWFh9O3bt11zXlNTE7fffnvcf//9cf/998fgwYPj2GOPjVdeeaXNfQAAAACwY3m5pFUafOYzn8lcmiQioqysbKfrIrbeT+PjLrvsssyNc7fZZ599sj7O3r17x6JFi2LdunXx5JNPxj/90z/Fvvvuu93lrvizNMxtc3NzVFdXxx133BE9e/aMI444It599934zne+E9dee21Wt9Wd7L333nHiiSfGf/3Xf0WSJHHiiSdG//79t2vXFea5sbExpk2bFnPmzNnhGNm1bM51kiQ5HevHHXDAAXHAAQdkfj/qqKNi2bJlMXv27Ljnnns6bRwAAAAA3VFeAo9H/vkz+dhsu5SVlcWIESPavW6b/v3777TNwIED44UXXmixbNWqVZl17dGjR4/MdsaMGROLFy+Ob33rW3kJPO454d5O3+buSMPc1tTURFFRUfTs2TOz7MADD4za2trYtGlTFBcXt7mvbDjzh3/fqdvriC9/+ctx4YUXRkTEv//7v++wTVeY52XLlsXy5cvjpJNOyixrbm6OiK1/ObBkyZLYb7/92tRXtgx8bVGnbq+jOjrXI0eOjIiIxYsXx1FHHbXd+sWLF8dBBx0UEVvndfXq1S3Wf/TRR1FfX9/uuv2X/uqv/irmzZvXoT4AAAAAyFPg0aesc7+s7WrGjx8f119/faxevTpziZTHH388KioqMl+u7a7m5uZoamrKxjDbrbKkMi/b7UqyNbdHH3103HvvvdHc3Bw9emy98tybb74ZNTU1nR52RESUVvbq9G3urkmTJsWmTZuioKAgJk6cmJNtZGOeR40aFb/97W9bLLvqqquisbExbr755hg8eHDWx92anv36dfo2O6Kjcz1hwoTo27dv3HTTTdsFHg8++GAsXbo0rrvuuojYOudr1qyJl19+OY444oiIiHjqqaeiubk5jjzyyA7tx6JFi6KmpqZDfQAAAADgkla7pampabtrthcWFra4nEpjY+N2bfbaa6+oqKiICRMmxEEHHRTTpk2Lb3/721FbWxtXXXVVzJgxI0pKSjLtFy3a+r+t161bF3/6059i0aJFUVxcnPlC9Vvf+laMGzcu9ttvv2hqaoqHH3447rnnnu0u30LbdZW5veCCC+LWW2+Nr371q3HRRRfF0qVL44YbboiLL744V7vebfTs2TMWL16c+feOdIV57tWrVxxyyCEt+q+qqoqI2G45O9bRuS4rK4v/+I//iNNPPz3OO++8uPDCC6OioiKefPLJuOyyy+Lv//7v47TTTouIrX9hNWnSpDj33HPj9ttvj82bN8eFF14Yp59+egwaNCjT9xtvvBGbNm2K+vr6aGxszBwDY8aMiYiI73//+zF8+PA4+OCDY+PGjfGf//mf8dRTT8Vjjz2W9ecHAAAAYI+TsJ2zzjorOfnkk3e6LiK2+znggAMybYYOHbrDNl/5ylcybZYvX56ccMIJSWlpadK/f//k61//erJ58+YW29pRH0OHDs2sv/LKK5MRI0YkvXr1Svr06ZOMHz8+mTt3bnafjG4mLXObJEkyf/785Mgjj0xKSkqSfffdN7n++uuTjz76KHtPRjeyq3lNkiQ5+eSTk7POOivTtivNc3v2g+zPdZIkya9//etk4sSJSUVFRVJcXJwcfPDByXe/+93tXm91dXXJlClTkvLy8qSioiKZPn160tjY2KLNzo6dbWbNmpXst99+Sa9evZK+ffsmxx57bPLUU0918FkBAAAAIEmSpCBJOvFurQAAAAAAADnQI98DAAAAAAAA6CiBBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKlXmKuOm5ub47333ovevXtHQUFBrjYDAAAAAACkQJIk0djYGIMGDYoePbL/9xg5Czzee++9GDx4cK66BwAAAAAAUugPf/hDfOITn8h6vzkLPHr37h0RWwdeUVGRq80AAAAAAAApsHbt2hg8eHAmP8i2nAUe2y5jVVFRIfAAAAAAAAAiInJ2Gww3LQcAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6OQ881td/mOtNQLf0x5Xvxnd/clP8ceW7+R4K3Uz9xvq4d/GPon5jfURsrdMv3febrNbrXPSZZltWrYq1N30vtqxale+hZE132KdsHadpOt7/8vXf0XaQbbk49j7e5/uNTTHn6bfi/camrPXPnq2zjil1GSC3ukud7S77EdG99oXOlfPAY8OajbneBHRLtXW18euip6K2rjbfQ6Gb+WBjfcxdcm988P9PGj78YEO8PPe38eEHG7K2jVz0mWZbVq+Oxu/Nji2rV+d7KFnTHfYpW8dpmo73v3z9d7QdZFsujr2P9/l+Y1Pc+cwygQdZ01nHlLoMkFvdpc52l/2I6F77QudySSsAAAAAACD1BB4AAAAAAEDqFeZ6A03rN8WGBpe1gvba9OHmiIhY/9H6aGhqyPNo6E7WbVq3w+VN67JXr5vWbcpKP91N85qG2FJXl+9hZEXzmu5Tlzp67KfxeF+3ad0u31t2Viegs7R2jLa3r7/UuGFzfLA+fa9dup7GDZs7dXvZfG0A8Gfd7fy3O7xfdLc5ofPkPPB49Ppno7SoNNebgW6noV99xOSIm5bPilie79GwJ/jlNU/mewjdXt3pU/I9BHZgTzz2r55/Zb6HALuU62P0oh++lNP+IVfUbwDawvsFezKXtAIAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9nN/DY+KVn45hhwzJ9Wag23l1yaKY996j8fVhl8eYAw/L93DoRpY3vL3D63me+M3PRr9hfbKyjbrlH+yR90VoTb+590XRQQfmexhZsfmNxd3mniQdPfbTeLxfd9T1Maxy+E7X76xOQGdp7Rhtjx0dz7ecOS5GDOydlf7Zs71V29ip94TJ5msDgD/rbue/3eH9orvNCZ0n54FHSVlxlFb2yvVmoNsp3qsoIiLKCsuisqQyz6OhOykvLt/h8pLy7NXrkvLirPTT3fSoqoye/frlexhZsaWq+9Sljh77aTzey4vLd/nesrM6AZ2ltWO0vX39pd6lRdGnLH2vXbqe3qVFnbq9bL42APiz7nb+2x3eL7rbnNB5XNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6vWcOXPmzFx03NTUFDfeeGP8y7/8S/SucpMZaK/mLUk0Ll8fnxr1qajoXZHv4dDN9OpZGqP3PjRKC0sjIqKotDAGHTIgirN4481c9JlmBWVlUTJ+fPQo7z7vid1hn7J1nKbpeP/L139H20G25eLY+3ifpcU944jhfWOvksKs9c+erbOOKXUZILe6S53tLvsR0b32hT/blhtcccUVUVJSkvX+C5IkSbLea0SsXbs2Kisro6GhISoqfFkLAAAAAAB7slznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8uoH6jfVx7+IfRf3G+i7RT67taJwdHfv7jU0x5+m34v3Gpqy0y5Zcba+z94PcM6fp1hXmryuMAei61AjypS3n+bvzWWDLqlWx9qbvxZZVq3ZrXB19PLBVe95fvBd1nnw91+Y4+zyndEV1OT4eBR7dwAcb62Puknvjgw4GFdnqJ9d2NM6Ojv39xqa485llbQo82tIuW3K1vc7eD3LPnKZbV5i/rjAGoOtSI8iXtpzn785ngS2rV0fj92bHltWrd2tcHX08sFV73l+8F3WefD3X5jj7PKd0RXXrBB4AAAAAAAC7JPAAAAAAAABST+ABAAAAAACkXmG+B0D2rNu0LhqaGjr0+DT5+P5ma+yNGzbHB+s37XJ9PrQ2rt3pj+4p28cKnaMrvSYdQ8COdKU6xZ5pV591OvJZoHlNQ2ypq9utxwHZ05ZzUO9Fna+zPxuY49zxOY+upHHDRzntX+DRjVw9/8p8D6FT5WJ/L/rhS1nvMxu66rjoehwrdJRjCICuKFefdepOn5KTfoH2cQ7aNZmX7sNc0pV81LQ+p/27pBUAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDquYdHN3LdUdfHsMrhu/345Q1vp+o+IB/f32yN/ZYzx8WIgb13uv6t2sa8XPewtXG1V772g9zL9rFC5+hKr0nHELAjXalOsWfa1WedjnwW6Df3vig66MB2P27zG4vd/wOyqC3noN6LOl9nfzYwx7njcx5dyaKl78Xfzspd/wKPbqS8uDwqSyo79Pg0+fj+ZmvsvUuLok9Z8S7X50Nr49qd/uiesn2s0Dm60mvSMQTsSFeqU+yZdvVZpyOfBXpUVUbPfv3a/bgtVbv/uQvYXlvOQb0Xdb7O/mxgjnPH5zy6kt6luY0kXNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9XrOnDlzZi46bmpqihtvvDGuuOKKKCkpycUm+JhePUtj9N6HRmlhaZfoJ9d2NM6Ojr20uGccMbxv7FWy6+vItbVdtuRqe529H+SeOU23rjB/XWEMQNelRpAvbTnP353PAgVlZVEyfnz0KN+9e4B09PHAVu15f/Fe1Hny9Vyb4+zznNLVNDU1xc3f+07OcoOCJEmSrPcaEWvXro3KyspoaGiIioqKXGwCAAAAAABIiVznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN47CHeb2yKOU+/Fe83NnWoDQDs6eo31se9i38U9Rvrd7kMAAAA6FwCjz3E+41Nceczy1oNPFprAwB7ug821sfcJffGBx8LN3a0DAAAAOhcAg8AAAAAACD1BB4AAAAAAEDqFeZ7AHSuxg2b44P1m3a6DgBom3Wb1kVDU0Pm3wAAAEB+CTz2MBf98KV8DwEAuoWr51+Z7yEAAAAAH+OSVgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKnnHh57mFvOHBcjBvbe4bq3ahvd4wMA2ui6o66PYZXDIyJiecPb7ukBAAAAeSbw2MP0Li2KPmXFO10HALRNeXF5VJZUZv4NAAAA5JdLWgEAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gcceon/vkjjn2P2if++SDrUBgD1dn1594/QDvhR9evXd5TIAAACgcxUkSZLkouO1a9dGZWVlNDQ0REVFRS42AQAAAAAApESucwN/4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1CnPVcZIkERGxdu3aXG0CAAAAAABIiW15wbb8INtyFnjU1dVFRMTgwYNztQkAAAAAACBl6urqorKyMuv95izw6Nu3b0REvPPOOzkZOJAOa9eujcGDB8cf/vCHqKioyPdwgDxQBwB1AIhQCwB1AIhoaGiIIUOGZPKDbMtZ4NGjx9bbg1RWVipgQFRUVKgFsIdTBwB1AIhQCwB1APhzfpD1fnPSKwAAAAAAQCcSeAAAAAAAAKnXc+bMmTNz1nnPnnHsscdGYWHOrpwFpIBaAKgDgDoARKgFgDoA5LYOFCRJkmS9VwAAAAAAgE7kklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSr92Bx7vvvhtnnHFG9OvXL0pLS2P06NHx0ksvZdYnSRLXXHNN1NTURGlpaRx33HGxdOnSFn3U19fH1KlTo6KiIqqqquKcc86JdevWdXxvgE7RWh144IEHYsKECdGvX78oKCiIRYsWbdfHxo0bY8aMGdGvX78oLy+Pv/u7v4tVq1Z15m4AHbSrWrB58+a4/PLLY/To0VFWVhaDBg2KM888M957770WfTgngHRr7Zxg5syZMWrUqCgrK4s+ffrEcccdFwsXLmzRhzoA6dZaHfi4888/PwoKCuL73/9+i+XqAKRfa7Xg7LPPjoKCghY/kyZNatGHWgDp1pZzgsWLF8fkyZOjsrIyysrK4pOf/GS88847mfXZ+L6wXYHHBx98EEcffXQUFRXFI488Em+88UbcdNNN0adPn0ybb3/72/Fv//Zvcfvtt8fChQujrKwsJk6cGBs3bsy0mTp1avzud7+Lxx9/PB566KH49a9/Heedd167Bg7kR1vqwPr16+OYY46JWbNm7bSfr33ta/GLX/wifvKTn8Szzz4b7733XnzhC1/ojF0AsqC1WvDhhx/GK6+8EldffXW88sor8cADD8SSJUti8uTJLfpxTgDp1ZZzgpEjR8att94av/3tb2PevHkxbNiwmDBhQvzpT3/KtFEHIL3aUge2+dnPfhb/93//F4MGDdpunToA6dbWWjBp0qRYuXJl5ue+++5rsV4tgPRqSx1YtmxZHHPMMTFq1Kh45pln4rXXXourr746evXqlWmTle8Lk3a4/PLLk2OOOWan65ubm5OBAwcm3/nOdzLL1qxZk5SUlCT33XdfkiRJ8sYbbyQRkbz44ouZNo888khSUFCQvPvuu+0ZDpAHrdWBj3v77beTiEheffXVFsvXrFmTFBUVJT/5yU8yyxYvXpxERLJgwYKsjhfIjfbUgm1eeOGFJCKSFStWJEninADSbnfqQENDQxIRyRNPPJEkiToAadfWOvDHP/4x2WeffZLXX389GTp0aDJ79uzMOnUA0q8tteCss85KTj755J2uVwsg3dpSB774xS8mZ5xxxk7XZ+v7wnb9hceDDz4Y48aNi1NPPTWqq6tj7NixMWfOnMz6t99+O2pra+O4447LLKusrIwjjzwyFixYEBERCxYsiKqqqhg3blymzXHHHRc9evTY7s/bga6ntTrQFi+//HJs3ry5Ra0YNWpUDBkyJFMrgK5td2pBQ0NDFBQURFVVVUQ4J4C0a28d2LRpU9xxxx1RWVkZhx12WESoA5B2bakDzc3NMW3atLjsssvi4IMP3q4PdQDSr63nBM8880xUV1fHAQccEBdccEHU1dVl1qkFkG6t1YHm5ub45S9/GSNHjoyJEydGdXV1HHnkkfHzn/880yZb3xe2K/D4/e9/H7fddlvsv//+8eijj8YFF1wQF198cdx9990REVFbWxsREQMGDGjxuAEDBmTW1dbWRnV1dYv1hYWF0bdv30wboOtqrQ60RW1tbRQXF2e+9Nzm47UC6NraWws2btwYl19+eUyZMiUqKioiwjkBpF1b68BDDz0U5eXl0atXr5g9e3Y8/vjj0b9//4hQByDt2lIHZs2aFYWFhXHxxRfvsA91ANKvLbVg0qRJ8cMf/jCefPLJmDVrVjz77LNxwgknxJYtWyJCLYC0a60OrF69OtatWxc33nhjTJo0KR577LE45ZRT4gtf+EI8++yzEZG97wsL2zPw5ubmGDduXNxwww0RETF27Nh4/fXX4/bbb4+zzjqrPV0BKaUOABHtqwWbN2+O0047LZIkidtuuy0fwwVyoK114DOf+UwsWrQo3n///ZgzZ06cdtppsXDhwu2+1ADSp7U68PLLL8fNN98cr7zyShQUFOR5tECutOWc4PTTT8+0Hz16dBx66KGx3377xTPPPBOf/exn8zJuIHtaqwPNzc0REXHyySfH1772tYiIGDNmTMyfPz9uv/32+PSnP521sbTrLzxqamrioIMOarHswAMPzNxJfeDAgRER2905fdWqVZl1AwcOjNWrV7dY/9FHH0V9fX2mDdB1tVYH2mLgwIGxadOmWLNmTYvlH68VQNfW1lqwLexYsWJFPP7445vo3U4AAAbjSURBVJm/7ohwTgBp19Y6UFZWFiNGjIhPfepTceedd0ZhYWHceeedEaEOQNq1Vgeee+65WL16dQwZMiQKCwujsLAwVqxYEV//+tdj2LBhEaEOQHewO98T7LvvvtG/f/946623IkItgLRrrQ70798/CgsLW80WsvF9YbsCj6OPPjqWLFnSYtmbb74ZQ4cOjYiI4cOHx8CBA+PJJ5/MrF+7dm0sXLgwxo8fHxER48ePjzVr1sTLL7+cafPUU09Fc3NzHHnkke0ZDpAHrdWBtjjiiCOiqKioRa1YsmRJvPPOO5laAXRtbakF28KOpUuXxhNPPBH9+vVr0d45AaTb7p4TNDc3R1NTU0SoA5B2rdWBadOmxWuvvRaLFi3K/AwaNCguu+yyePTRRyNCHYDuYHfOCf74xz9GXV1d1NTURIRaAGnXWh0oLi6OT37yk7tsk7XvC9t8e/MkSV544YWksLAwuf7665OlS5cmP/rRj5K99tor+e///u9MmxtvvDGpqqpK/vd//zd57bXXkpNPPjkZPnx4smHDhkybSZMmJWPHjk0WLlyYzJs3L9l///2TKVOmtGcoQJ60pQ7U1dUlr776avLLX/4yiYhk7ty5yauvvpqsXLky0+b8889PhgwZkjz11FPJSy+9lIwfPz4ZP358PnYJ2A2t1YJNmzYlkydPTj7xiU8kixYtSlauXJn5aWpqyvTjnADSq7U6sG7duuSKK65IFixYkCxfvjx56aWXkunTpyclJSXJ66+/nulHHYD0astng780dOjQZPbs2S2WqQOQbq3VgsbGxuTSSy9NFixYkLz99tvJE088kRx++OHJ/vvvn2zcuDHTj1oA6dWWc4IHHnggKSoqSu64445k6dKlyS233JL07Nkzee655zJtsvF9YbsCjyRJkl/84hfJIYcckpSUlCSjRo1K7rjjjhbrm5ubk6uvvjoZMGBAUlJSknz2s59NlixZ0qJNXV1dMmXKlKS8vDypqKhIpk+fnjQ2NrZ3KECetFYHfvCDHyQRsd3Ptddem2mzYcOG5B//8R+TPn36JHvttVdyyimntAhEgK5vV7Xg7bff3mEdiIjk6aefzrRzTgDptqs6sGHDhuSUU05JBg0alBQXFyc1NTXJ5MmTkxdeeKFFH+oApFtrnw3+0o4CD3UA0m9XteDDDz9MJkyYkOy9995JUVFRMnTo0OTcc89NamtrW/ShFkC6teWc4M4770xGjBiR9OrVKznssMOSn//85y3WZ+P7woIkSZJ2/HUKAAAAAABAl9Oue3gAAAAAAAB0RQIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAA6LCzzz47Pv/5z+d7GAAAwB6sMN8DAAAAuraCgoJdrr/22mvj5ptvjiRJOmlEAAAA2xN4AAAAu7Ry5crMv//nf/4nrrnmmliyZElmWXl5eZSXl+djaAAAABkuaQUAAOzSwIEDMz+VlZVRUFDQYll5efl2l7Q69thj46KLLopLLrkk+vTpEwMGDIg5c+bE+vXrY/r06dG7d+8YMWJEPPLIIy229frrr8cJJ5wQ5eXlMWDAgJg2bVq8//77nb3LAABACgk8AACAnLj77rujf//+8cILL8RFF10UF1xwQZx66qlx1FFHxSuvvBITJkyIadOmxYcffhgREWvWrIm//du/jbFjx8ZLL70Uv/rVr2LVqlVx2mmn5XlPAACANBB4AAAAOXHYYYfFVVddFfvvv39cccUV0atXr+jfv3+ce+65sf/++8c111wTdXV18dprr0VExK233hpjx46NG264IUaNGhVjx46Nu+66K55++ul4880387w3AABAV+ceHgAAQE4ceuihmX/37Nkz+vXrF6NHj84sGzBgQERErF69OiIifvOb38TTTz+9w/uBLFu2LEaOHJnjEQMAAGkm8AAAAHKiqKioxe8FBQUtlhUUFERERHNzc0RErFu3Lk466aSYNWvWdn3V1NTkcKQAAEB3IPAAAAC6hMMPPzzuv//+GDZsWBQW+qgCAAC0j3t4AAAAXcKMGTOivr4+pkyZEi+++GIsW7YsHn300Zg+fXps2bIl38MDAAC6OIEHAADQJQwaNCief/752LJlS0yYMCFGjx4dl1xySVRVVUWPHj66AAAAu1aQJEmS70EAAAAAAAB0hP8mBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1/h//ffiQt3ubxgAAAABJRU5ErkJggg==", + "text/plain": [ + "" ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "groundtruth" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MxlrTbyPYnqB" + }, + "source": [ + "# Going further \n", + "\n", + "We have only scratched the surface in this introduction. \n", + "\n", + "More details can be found in the [`pyannote.audio` Github repository](https://github.com/pyannote/pyannote-audio).\n" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "include_colab_link": true, + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "0125df9fa8e14b3db0e2bce299529812": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_407e250e244b4985b1ce8c9d32a8af7d", + "max": 318, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_8127c4258e374ad986ce1f8b4c70f704", + "value": 318 + } }, - { - "cell_type": "markdown", - "metadata": { - "id": "DLhErS6wYnqB" - }, - "source": [ - "# Evaluation with `pyannote.metrics`\n", - "\n", - "Because groundtruth is available, we can evaluate the quality of the diarization pipeline by computing the [diarization error rate](http://pyannote.github.io/pyannote-metrics/reference.html#diarization)." - ] + "0821b47ae70444dfa38b84719c4836a6": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "vNHQRTUIYnqB", - "vscode": { - "languageId": "python" - } - }, - "outputs": [], - "source": [ - "from pyannote.metrics.diarization import DiarizationErrorRate\n", - "metric = DiarizationErrorRate()\n", - "der = metric(groundtruth, diarization)" - ] + "0adb304bf90f4079a4031caea1cfb924": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "9d0vKQ0fYnqB", - "outputId": "9a664753-cd84-4211-9153-d33e929bb252", - "vscode": { - "languageId": "python" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "diarization error rate = 19.2%\n" - ] - } + "0b4bf8076fdf4d19843a3246c8bd61ac": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0d10fb0edc9144b1a1fc1f2c9e322410": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d33fba0d78fb41f983c55f5cd2a0a740", + "placeholder": "​", + "style": "IPY_MODEL_fd47487fc8734594823f8afa00c4239d", + "value": "Downloading: 100%" + } + }, + "0d80273cabbc42ba9a408fb1144151c9": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "0e382d66f09f4958a40baa7ab83c4ccb": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "13525aa369a9410a83343952ab511f3c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "1600b9cd09c446e581b7912e35c9f56e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "16c0017f65b649f5ac5bebf1c955a1fd": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "17856a72e4e948039a66c51e8244cb50": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "183c55d5d3ce4058ae338c81344547c5": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_70efa83bf3ea45b4bd8cc41f57613328", + "IPY_MODEL_338747810ac74b4e83e356a01459c8a5", + "IPY_MODEL_ac0bcfa1ef6e4e78a7769c4cb2e8762f" ], - "source": [ - "print(f'diarization error rate = {100 * der:.1f}%')" - ] + "layout": "IPY_MODEL_6efb7939bb954dc8ba116680139eb257" + } }, - { - "cell_type": "markdown", - "metadata": { - "id": "Xz5QJV9nYnqB" - }, - "source": [ - "This implementation of diarization error rate is brought to you by [`pyannote.metrics`](http://pyannote.github.io/pyannote-metrics/).\n", - "\n", - "It can also be used to improve visualization by find the optimal one-to-one mapping between groundtruth and hypothesized speakers." - ] + "1946386483ed4947a2184cdb4ea6e434": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 230 - }, - "id": "xMLf4mrYYnqB", - "outputId": "ed08bcc8-24c6-439c-a244-3a673ff480b0", - "vscode": { - "languageId": "python" - } - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3de5AV5Z038N/AXMAZZkBwBjBcjILoiooh5RLdjUYDWFS8ZF9NjKISy42E9baJL2tF0JiViJbFurqlJaWuMUG34rKJQY1Ro64ogaASNbKIBDAqIxFkuA/K9PuHL2czAWRmOIdznuHzqTpVTPczTz99+vSve/rL6S7LsiwLAAAAAACAhHUp9gAAAAAAAAD2lsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsBjFy666KIoKyvb6fXWW2/tdt7YsWNzvz948OBdtrnppptybd5+++0YN25cHHDAAVFfXx9XX311fPzxx7n5q1atim984xsxdOjQ6NKlS1x55ZU7jXP27NkxcuTI6NmzZ1RXV8exxx4bDzzwQGHfnMSlsm0jItatWxeTJk2Kfv36RVVVVQwdOjQee+yxwr05Cdux7S699NKd5k2aNCnKysrioosuatW2VLbzDg899FCUlZXFmWeeuZfvBgAAAADsn8qLsdAPN23bp8vrVV3Z7t8ZO3Zs3Hfffa2mHXTQQbudV1VV1ernG264IS655JJW03r06BEREdu3b49x48ZF375948UXX4xVq1bFBRdcEBUVFTFt2rSIiGhubo6DDjoorr322pgxY8Yux3jggQfG9773vRg2bFhUVlbGnDlzYsKECVFfXx9jxoxp9zrvrabmpn26vLqqug79Xgrbdtu2bfHlL3856uvr4+GHH46DDz44Vq5cGT179uzQOu+tLU1b9+nyutd1a/fvDBgwIB566KGYMWNGdO/ePSIitm7dGrNmzYqBAwe2alsq23mHFStWxHe/+934m7/5m3avNwAAAADwiaIEHqfd/Mw+Xd5vvt/+i/9VVVXRt2/fds/boUePHrtt86tf/SreeOONeOqpp6KhoSGOPfbY+MEPfhCTJ0+O66+/PiorK2Pw4MFx2223RUTEvffeu8t+TjrppFY/X3HFFXH//ffH3LlzixJ4jH/8G/t0eY+c+WiHfi+FbXvvvffG2rVr48UXX4yKioqI+ORbB8Xyowse3qfL+9bPz2/37xx33HGxbNmymD17dpx33nkR8cm3oAYOHBiHHHJIq7alsp0jPglPzjvvvPj+978fzz//fKxbt649qw0AAAAA/H9uaVUE8+bNi+HDh0dDQ0Nu2pgxY2L9+vXx+9//vkN9ZlkWTz/9dCxZsiT+9m//Nl9DpZ3ytW0feeSRGDVqVEyaNCkaGhriqKOOimnTpsX27dsLMexO45vf/Garb27ce++9MWHChLwvJ5/78A033BD19fVx8cUX53uYAAAAALBfEXjsxpw5c6Kmpib3Ovvss3c7r6amJncbmx0mT568U5vnn38+IiIaGxtbXSiNiNzPjY2N7RpnU1NT1NTURGVlZYwbNy5uv/32+PKXv9yRVd5vpLBt//CHP8TDDz8c27dvj8ceeyymTJkSt956a/zzP/9zR1d7v3D++efH3LlzY+XKlbFy5cp44YUX4vzzd/62SKls57lz58Y999wTM2fObO+qAgAAAAB/oSi3tErBySefHHfeeWfu5+rq6t3Oi/jkeRp/7uqrr849JHmHgw8+OO/j7NGjRyxatCg2btwYTz/9dPzjP/5jfPazn93pdlf8rxS2bUtLS9TX18fdd98dXbt2jc997nPx7rvvxi233BLXXXddXpfVmRx00EExbty4+Pd///fIsizGjRsXffr02aldKWznDRs2xPjx42PmzJm7HCMAAAAA0D5FCTwe/78nF2Ox7VJdXR2HHXZYu+ft0KdPn9226du3byxYsKDVtPfffz83rz26dOmSW86xxx4bixcvjh/+8IdFCTweOG3WPl9mR6Swbfv16xcVFRXRtWvX3LQjjjgiGhsbY9u2bVFZWdnmvvLhgh/9n326vL3xzW9+M/7hH/4hIiL+7d/+bZdtSmE7L1u2LFasWBFf+cpXctNaWloiIqK8vDyWLFkShx56aJv6AgAAAACKFHj0qt63F2tLzahRo+LGG2+M1atXR319fUREPPnkk1FbWxtHHnnkXvXd0tISzc3N+Rhmu9VV1RVluaUkX9v2hBNOiFmzZkVLS0t06fLJnefefPPN6Nev3z4POyIiutd12+fL7KixY8fGtm3boqysLMaMGVOQZeRjOw8bNixee+21VtOuvfba2LBhQ9x2220xYMCAvI8bAAAAADozt7TqgObm5p3u019eXt7qtjQbNmzYqc0BBxwQtbW1MXr06DjyyCNj/PjxcfPNN0djY2Nce+21MWnSpKiqqsq1X7RoUUREbNy4Mf70pz/FokWLorKyMndB9Yc//GGMHDkyDj300Ghubo7HHnssHnjggZ1u1UPblcq2nThxYtxxxx1xxRVXxGWXXRZLly6NadOmxeWXX16oVe80unbtGosXL879e1dKYTt369YtjjrqqFb99+zZMyJip+kAAAAAwJ4JPDrgl7/8ZfTr16/VtMMPPzz+53/+J/fz1KlTY+rUqa3afOtb34q77rorunbtGnPmzImJEyfGqFGjorq6Oi688MK44YYbWrUfMWJE7t8vvfRSzJo1KwYNGhQrVqyIiIhNmzbFt7/97XjnnXeie/fuMWzYsPjxj38cX/va1/K8xvuPUtm2AwYMiCeeeCKuuuqqOProo+Pggw+OK664IiZPnpznNe6camtrP3V+qWxnAAAAACB/yrIsy4o9CAAAAAAAgL3RpdgDAAAAAAAA2FsCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHnlheq4paUl3nvvvejRo0eUlZUVajEAAAAAAEACsiyLDRs2RP/+/aNLl/x/H6Nggcd7770XAwYMKFT3AAAAAABAgv74xz/GZz7zmbz3W7DAo0ePHhHxycBra2sLtRgAAAAAACAB69evjwEDBuTyg3wrWOCx4zZWtbW1Ag8AAAAAACAiomCPwfDQcgAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkFDzw2rd1c6EVAydi0dnMsfPB3e/W5X7t1bcxa/JNYu3VtHkcGHeczSWfUlnqdj5peSjat3RxP3vdMTH/wlnhn1bu7bWefp1hK/bNX6uPrjPbFe17q27XUxwewvynVulyq49pbnXW9KKyCBx5b1m0t9CKgZGz+cEu89NBrsfnDLR3u48Ota+OhJbPiQ8WcEuEzSWfUlnqdj5peSjZ/uCVeef61eKH7s9G4pnG37ezzFEupf/ZKfXyd0b54z0t9u5b6+AD2N6Val0t1XHurs64XheWWVgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPLKC72A5k3bYkuT53iwf2jeuC1vfW3ctjGampvy1h901MZtG4s9BCiY5o27P0/JZ00vNZs+3rTbY4x9nmIr1XMg+0bxFPIzkcp2LdX9AmB/U+rHjc52vCj195vSVPDA44kbn4vuFd0LvRjodKa8+L1iDwGg03t06tPFHkJR3LpiesSKYo8Cds05EH/JZ8J7AEDbOF6AW1oBAAAAAACdgMADAAAAAABInsADAAAAAABIXsGf4THme1+MwUcNLPRioCSsWfFh3u4H/4Mv3BiD6w7JS1+wN1Y0LXcfUDqtcTecEr0H99rlvHzW9FLzncGT49gjjtnlPPs8xVaq50D2jeIp5Gcile1aqvsFwP6m1I8bne14UervN6Wp4IFHVXVldK/rVujFQEmoqqnMW181lTVRV1WXt/6go2oqa4o9BCiYqprdn6fks6aXmury6t0eY+zzFFupngPZN4qnkJ+JVLZrqe4XAPubUj9udLbjRam/35Qmt7QCAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACS1/X666+/vhAdNzc3x0033RT/9E//FD16esAM+4+K7uXR/6iGqOxe0eE+unXtHsMPOjq6l3fP48ig43wm6YzaUq/zUdNLycdbPoqKqIwvHDkqanvU7radfZ5iKfXPXqmPrzPaF+95qW/XUh8fwP6mVOtyqY5rb3XW9dqf7cgNrrnmmqiqqsp7/2VZlmV57zUi1q9fH3V1ddHU1BS1tbv/gxoAAAAAAOj8Cp0buKUVAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIFHotZuXRuzFv8k1m5d2ymWA3TcBxuaY+Yzb8UHG5qLPRQAOrHUjzepj5/S5HMFAB3jGLr/WlPgbS7wSNSHW9fGQ0tmxYcFDiL21XKAjvtgQ3Pc8+wyJwkAFFTqx5vUx09p8rkCgI5xDN1/rdko8AAAAAAAAPhUAg8AAAAAACB55cUeAHtn47aN0dTcVND+gTRs2PJRfLhpW7GHAUAntWHLR8UeQl44XpJPnWW/AIBicW62/9mw5eOC9i/wSNyUF79X7CEAJeKyHy0s9hAAoOQ5XgIAlA7nZvufj5s3FbR/t7QCAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACS5xkeifvBF26MwXWHFKz/FU3LPScEEnH7BSPjsL49ij0MADqptxo3dIp7LDtekk+dZb8AgGJxbrb/WbT0vfjS9ML1L/BIXE1lTdRV1RW0fyANPbpXRK/qymIPA4BOqkf3imIPIS8cL8mnzrJfAECxODfb//ToXthIwi2tAAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8EtWr24Hx9cO/Eb26HdgplgN0XJ8eVXHxSYdGnx5VxR4KAJ1Y6seb1MdPafK5AoCOcQzdf/WuKew2L8uyLCtEx+vXr4+6urpoamqK2traQiwCAAAAAABIRKFzA9/wAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkldeqI6zLIuIiPXr1xdqEQAAAAAAQCJ25AU78oN8K1jgsWbNmoiIGDBgQKEWAQAAAAAAJGbNmjVRV1eX934LFngceOCBERHx9ttvF2TgQBrWr18fAwYMiD/+8Y9RW1tb7OEARaAOAOoAEKEWAOoAENHU1BQDBw7M5Qf5VrDAo0uXTx4PUldXp4ABUVtbqxbAfk4dANQBIEItANQB4H/zg7z3W5BeAQAAAAAA9iGBBwAAAAAAkLyu119//fUF67xr1zjppJOivLxgd84CEqAWAOoAoA4AEWoBoA4Aha0DZVmWZXnvFQAAAAAAYB9ySysAAAAAACB5Ag8AAAAAACB5Ag8AAAAAACB5Ag8AAAAAACB57Q483n333Tj//POjd+/e0b179xg+fHgsXLgwNz/Lspg6dWr069cvunfvHqeeemosXbq0VR9r166N8847L2pra6Nnz55x8cUXx8aNG/d+bYB9Yk91YPbs2TF69Ojo3bt3lJWVxaJFi3bqY+vWrTFp0qTo3bt31NTUxN/93d/F+++/vy9XA9hLn1YLPvroo5g8eXIMHz48qquro3///nHBBRfEe++916oP5wSQtj2dE1x//fUxbNiwqK6ujl69esWpp54a8+fPb9WHOgBp21Md+HOXXnpplJWVxb/8y7+0mq4OQPr2VAsuuuiiKCsra/UaO3Zsqz7UAkhbW84JFi9eHKeffnrU1dVFdXV1fP7zn4+33347Nz8f1wvbFXh8+OGHccIJJ0RFRUU8/vjj8cYbb8Stt94avXr1yrW5+eab41//9V/jrrvuivnz50d1dXWMGTMmtm7dmmtz3nnnxe9///t48sknY86cOfHf//3f8fd///ftGjhQHG2pA5s2bYoTTzwxpk+fvtt+rrrqqvjFL34RP/3pT+O5556L9957L7761a/ui1UA8mBPtWDz5s3x8ssvx5QpU+Lll1+O2bNnx5IlS+L0009v1Y9zAkhXW84Jhg4dGnfccUe89tprMXfu3Bg8eHCMHj06/vSnP+XaqAOQrrbUgR3+67/+K37zm99E//79d5qnDkDa2loLxo4dG6tWrcq9HnzwwVbz1QJIV1vqwLJly+LEE0+MYcOGxbPPPhuvvvpqTJkyJbp165Zrk5frhVk7TJ48OTvxxBN3O7+lpSXr27dvdsstt+SmrVu3LquqqsoefPDBLMuy7I033sgiIvvtb3+ba/P4449nZWVl2bvvvtue4QBFsKc68OeWL1+eRUT2yiuvtJq+bt26rKKiIvvpT3+am7Z48eIsIrJ58+bldbxAYbSnFuywYMGCLCKylStXZlnmnABS15E60NTUlEVE9tRTT2VZpg5A6tpaB955553s4IMPzl5//fVs0KBB2YwZM3Lz1AFIX1tqwYUXXpidccYZu52vFkDa2lIHvva1r2Xnn3/+bufn63phu77h8cgjj8TIkSPj7LPPjvr6+hgxYkTMnDkzN3/58uXR2NgYp556am5aXV1dHH/88TFv3ryIiJg3b1707NkzRo4cmWtz6qmnRpcuXXb6ejtQevZUB9ripZdeio8++qhVrRg2bFgMHDgwVyuA0taRWtDU1BRlZWXRs2fPiHBOAKlrbx3Ytm1b3H333VFXVxfHHHNMRKgDkLq21IGWlpYYP358XH311fFXf/VXO/WhDkD62npO8Oyzz0Z9fX0cfvjhMXHixFizZk1unloAadtTHWhpaYlHH300hg4dGmPGjIn6+vo4/vjj42c/+1muTb6uF7Yr8PjDH/4Qd955ZwwZMiSeeOKJmDhxYlx++eVx//33R0REY2NjREQ0NDS0+r2GhobcvMbGxqivr281v7y8PA488MBcG6B07akOtEVjY2NUVlbmLnru8Oe1Aiht7a0FW7dujcmTJ8e5554btbW1EeGcAFLX1jowZ86cqKmpiW7dusWMGTPiySefjD59+kSEOgCpa0sdmD59epSXl8fll1++yz7UAUhfW2rB2LFj40c/+lE8/fTTMX369HjuuefitNNOi+3bt0eEWgCp21MdWL16dWzcuDFuuummGDt2bPzqV7+Ks846K7761a/Gc889FxH5u15Y3p6Bt7S0xMiRI2PatGkRETFixIh4/fXX46677ooLL7ywPV0BiVIHgIj21YKPPvoozjnnnMiyLO68885iDBcogLbWgZNPPjkWLVoUH3zwQcycOTPOOeecmD9//k4XNYD07KkOvPTSS3HbbbfFyy+/HGVlZUUeLVAobTkn+PrXv55rP3z48Dj66KPj0EMPjWeffTZOOeWUoowbyJ891YGWlpaIiDjjjDPiqquuioiIY489Nl588cW466674otf/GLextKub3j069cvjjzyyFbTjjjiiNyT1Pv27RsRsdOT099///3cvL59+8bq1atbzf/4449j7dq1uTZA6dpTHWiLvn37xrZt22LdunWtpv95rQBKW1trwY6wY+XKlfHkk0/mvt0R4ZwAUtfWOlBdXR2HHXZY/PVf/3Xcc889UV5eHvfcc09EqAOQuj3Vgeeffz5Wr14dAwcOjPLy8igvL4+VK1fGd77znRg8eHBEqAPQGXTkOsFnP/vZ6NOnT7z11lsRoRZA6vZUB/r06RPl5eV7zBbycb2wXYHHCSecEEuWLGk17c0334xBgwZFRMQhhxwSffv2jaeffjo3f/369TF//vwYNWpURESMGjUq1q1bFy+99FKuza9//etoaWmJ448/vj3DAYpgT3WgLT73uc9FRUVFq1qxZMmSePvtt3O1AihtbakFO8KOpUuXxlNPPRW9e/du1d45AaSto+cELS0t0dzcHBHqAKRuT3Vg/Pjx8eqrr8aiRYtyr/79+8fVV18dTzzxRESoA9AZdOSc4J133ok1a9ZEv379IkItgNTtqQ5UVlbG5z//+U9tk7frhW1+vHmWZQsWLMjKy8uzG2+8MVu6dGn2k5/8JDvggAOyH//4x7k2N910U9azZ8/s5z//efbqq69mZ5xxRnbIIYdkW7ZsybUZO3ZsNmLEiGz+/PnZ3LlzsyFDhmTnnntue4YCFElb6sCaNWuyV155JXv00UeziMgeeuih7JVXXslWrVqVa3PppZdmAwcOzH79619nCxcuzEaNGpWNGjWqGKsEdMCeasG2bduy008/PfvMZz6TLVq0KFu1alXu1dzcnOvHOQGka091YOPGjdk111yTzZs3L1uxYkW2cOHCbMKECVlVVVX2+uuv5/pRByBdbfnb4C8NGjQomzFjRqtp6gCkbU+1YMOGDdl3v/vdbN68edny5cuzp556KjvuuOOyIUOGZFu3bs31oxZAutpyTjB79uysoqIiu/vuu7OlS5dmt99+e9a1a9fs+eefz7XJx/XCdgUeWZZlv/jFL7Kjjjoqq6qqyoYNG5bdfffdrea3tLRkU6ZMyRoaGrKqqqrslFNOyZYsWdKqzZo1a7Jzzz03q6mpyWpra7MJEyZkGzZsaO9QgCLZUx247777sojY6XXdddfl2mzZsiX79re/nfXq1Ss74IADsrPOOqtVIAKUvk+rBcuXL99lHYiI7Jlnnsm1c04Aafu0OrBly5bsrLPOyvr3759VVlZm/fr1y04//fRswYIFrfpQByBte/rb4C/tKvBQByB9n1YLNm/enI0ePTo76KCDsoqKimzQoEHZJZdckjU2NrbqQy2AtLXlnOCee+7JDjvssKxbt27ZMccck/3sZz9rNT8f1wvLsizL2vHtFAAAAAAAgJLTrmd4AAAAAAAAlCKBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAsNcuuuiiOPPMM4s9DAAAYD9WXuwBAAAApa2srOxT51933XVx2223RZZl+2hEAAAAOxN4AAAAn2rVqlW5f//Hf/xHTJ06NZYsWZKbVlNTEzU1NcUYGgAAQI5bWgEAAJ+qb9++uVddXV2UlZW1mlZTU7PTLa1OOumkuOyyy+LKK6+MXr16RUNDQ8ycOTM2bdoUEyZMiB49esRhhx0Wjz/+eKtlvf7663HaaadFTU1NNDQ0xPjx4+ODDz7Y16sMAAAkSOABAAAUxP333x99+vSJBQsWxGWXXRYTJ06Ms88+O77whS/Eyy+/HKNHj47x48fH5s2bIyJi3bp18aUvfSlGjBgRCxcujF/+8pfx/vvvxznnnFPkNQEAAFIg8AAAAArimGOOiWuvvTaGDBkS11xzTXTr1i369OkTl1xySQwZMiSmTp0aa9asiVdffTUiIu64444YMWJETJs2LYYNGxYjRoyIe++9N5555pl48803i7w2AABAqfMMDwAAoCCOPvro3L+7du0avXv3juHDh+emNTQ0RETE6tWrIyLid7/7XTzzzDO7fB7IsmXLYujQoQUeMQAAkDKBBwAAUBAVFRWtfi4rK2s1raysLCIiWlpaIiJi48aN8ZWvfCWmT5++U1/9+vUr4EgBAIDOQOABAACUhOOOOy7+8z//MwYPHhzl5f5UAQAA2sczPAAAgJIwadKkWLt2bZx77rnx29/+NpYtWxZPPPFETJgwIbZv317s4QEAACVO4AEAAJSE/v37xwsvvBDbt2+P0aNHx/Dhw+PKK6+Mnj17Rpcu/nQBAAA+XVmWZVmxBwEAAAAAALA3/DcpAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgedWHukUAAABXSURBVAIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgef8P+AlOrStvWy0AAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } + "23d4e25ec6c541818d5927b69576d278": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "27f6f437c5264368bc2c679942ad1e53": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "28004251b0e44a6c9dfa7ce1b30dcb18": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_e98cf7a63c814ffd94f69928f0700ebf", + "IPY_MODEL_6a4dee55cbae4959bd7fe3c4d92242b1", + "IPY_MODEL_8dba487876124827919079519406ecb8" ], - "source": [ - "mapping = metric.optimal_mapping(groundtruth, diarization)\n", - "diarization.rename_labels(mapping=mapping)" - ] + "layout": "IPY_MODEL_5c211704f90946afbae2f66a7586ce70" + } }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 230 - }, - "id": "Z0ewsLlQYnqB", - "outputId": "8a8cd040-ee1d-48f7-d4be-eef9e08e9e55", - "vscode": { - "languageId": "python" - } - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3de3RV5Zk/8CeQCzEhCRcDwXJTRLygoHQs6kztVAHrEmtntFJEpY5WB7V2quO4vNE6WmlrqaOzdGS0Y50q01bbsVbrXSvC4JVaKwuRCrZKoCYSAkJAsn9/8OPUlEsSck5Odvh81spaZO/3vPvd593nOfucL9m7IEmSJAAAAAAAAFKsR74HAAAAAAAA0FECDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYHHDpx99tlRUFCw3c9bb72103WTJk3KPH7YsGE7bHPjjTdm2rzzzjtx4oknxl577RXV1dVx2WWXxUcffZRZv3LlyvjSl74UI0eOjB49esQll1yy3TgfeOCBGDduXFRVVUVZWVmMGTMm7rnnntw+OSmXlrmNiFizZk3MmDEjampqoqSkJEaOHBkPP/xw7p6cFNs2d+eff/5262bMmBEFBQVx9tlnt2jbVeZ5m7lz50ZBQUF8/vOf7+Cz0b1le64jIubPnx+f+9znok+fPtGrV68YPXp0fO9734stW7a0aFdfXx9Tp06NioqKqKqqinPOOSfWrVuXWb9x48Y4++yzY/To0VFYWLjDuXzmmWd2OKba2tosPDsAAAAAe7bCfGz0g/WbOnV7fcqK2/2YSZMmxQ9+8IMWy/bee++drispKWnx+ze/+c0499xzWyzr3bt3RERs2bIlTjzxxBg4cGDMnz8/Vq5cGWeeeWYUFRXFDTfcEBERTU1Nsffee8dVV10Vs2fP3uEY+/btG1deeWWMGjUqiouL46GHHorp06dHdXV1TJw4sd373FENTQ2dur3Kksrdelwa5nbTpk1x/PHHR3V1dfz0pz+NffbZJ1asWBFVVVW7tc8dtaFhY6dur7SyV7sfM3jw4Jg7d27Mnj07SktLI2LrF9D33ntvDBkypEXbrjLP2yxfvjwuvfTS+Ou//ut273c2bamr69Tt9ezXb7cel825/tnPfhannXZaTJ8+PZ5++umoqqqKJ554Iv75n/85FixYED/+8Y+joKAgIiKmTp0aK1eujMcffzw2b94c06dPj/POOy/uvffeiNh6XJSWlsbFF18c999//y73YcmSJVFRUZH5vbq6ereeCwAAAAD+LC+BxwnffrpTt/d/32j/l/8lJSUxcODAdq/bpnfv3jtt89hjj8Ubb7wRTzzxRAwYMCDGjBkT1113XVx++eUxc+bMKC4ujmHDhsXNN98cERF33XXXDvs59thjW/z+1a9+Ne6+++6YN29eXgKPaY98qVO39+Dnf7lbj0vD3N51111RX18f8+fPj6KioojY+lcH+fLDM3/aqdv7yv+e0e7HHH744bFs2bJ44IEHYurUqRGx9a+ghgwZEsOHD2/RtqvMc8TWL8mnTp0a3/jGN+K5556LNWvWtGe3s6r20DGdur193v3Dbj0uW3O9fv36OPfcc2Py5Mlxxx13ZJb/wz/8QwwYMCAmT54cP/7xj+OLX/xiLF68OH71q1/Fiy++GOPGjYuIiFtuuSU+97nPxXe/+90YNGhQlJWVxW233RYREc8///wu57K6ujpvASYAAABAd+WSVnmwYMGCGD16dAwYMCCzbOLEibF27dr43e9+t1t9JkkSTz75ZCxZsiT+5m/+JltDpZ2yNbcPPvhgjB8/PmbMmBEDBgyIQw45JG644YbtLrFDS1/+8pdb/G/+u+66K6ZPn5717WTzNfzNb34zqqur45xzzsn2MLu1bMz1Y489FnV1dXHppZdut+6kk06KkSNHxn333RcRW+e8qqoqE3ZERBx33HHRo0ePWLhwYbvHP2bMmKipqYnjjz8+nn/++XY/HgAAAIDtCTx24qGHHory8vLMz6mnnrrTdeXl5ZnL2Gxz+eWXb9fmueeei4iI2traFl+URkTm9/Zex72hoSHKy8ujuLg4TjzxxLjlllvi+OOP351d3mOkYW5///vfx09/+tPYsmVLPPzww3H11VfHTTfdFP/6r/+6u7u9RzjjjDNi3rx5sWLFilixYkU8//zzccYZ2/+1SFeZ53nz5sWdd94Zc+bMae+u7vGyMddvvvlmREQceOCBO9zGqFGjMm1qa2u3u+xUYWFh9O3bt11zXlNTE7fffnvcf//9cf/998fgwYPj2GOPjVdeeaXNfQAAAACwY3m5pFUafOYzn8lcmiQioqysbKfrIrbeT+PjLrvsssyNc7fZZ599sj7O3r17x6JFi2LdunXx5JNPxj/90z/Fvvvuu93lrvizNMxtc3NzVFdXxx133BE9e/aMI444It599934zne+E9dee21Wt9Wd7L333nHiiSfGf/3Xf0WSJHHiiSdG//79t2vXFea5sbExpk2bFnPmzNnhGNm1bM51kiQ5HevHHXDAAXHAAQdkfj/qqKNi2bJlMXv27Ljnnns6bRwAAAAA3VFeAo9H/vkz+dhsu5SVlcWIESPavW6b/v3777TNwIED44UXXmixbNWqVZl17dGjR4/MdsaMGROLFy+Ob33rW3kJPO454d5O3+buSMPc1tTURFFRUfTs2TOz7MADD4za2trYtGlTFBcXt7mvbDjzh3/fqdvriC9/+ctx4YUXRkTEv//7v++wTVeY52XLlsXy5cvjpJNOyixrbm6OiK1/ObBkyZLYb7/92tRXtgx8bVGnbq+jOjrXI0eOjIiIxYsXx1FHHbXd+sWLF8dBBx0UEVvndfXq1S3Wf/TRR1FfX9/uuv2X/uqv/irmzZvXoT4AAAAAyFPg0aesc7+s7WrGjx8f119/faxevTpziZTHH388KioqMl+u7a7m5uZoamrKxjDbrbKkMi/b7UqyNbdHH3103HvvvdHc3Bw9emy98tybb74ZNTU1nR52RESUVvbq9G3urkmTJsWmTZuioKAgJk6cmJNtZGOeR40aFb/97W9bLLvqqquisbExbr755hg8eHDWx92anv36dfo2O6Kjcz1hwoTo27dv3HTTTdsFHg8++GAsXbo0rrvuuojYOudr1qyJl19+OY444oiIiHjqqaeiubk5jjzyyA7tx6JFi6KmpqZDfQAAAADgkla7pampabtrthcWFra4nEpjY+N2bfbaa6+oqKiICRMmxEEHHRTTpk2Lb3/721FbWxtXXXVVzJgxI0pKSjLtFy3a+r+t161bF3/6059i0aJFUVxcnPlC9Vvf+laMGzcu9ttvv2hqaoqHH3447rnnnu0u30LbdZW5veCCC+LWW2+Nr371q3HRRRfF0qVL44YbboiLL744V7vebfTs2TMWL16c+feOdIV57tWrVxxyyCEt+q+qqoqI2G45O9bRuS4rK4v/+I//iNNPPz3OO++8uPDCC6OioiKefPLJuOyyy+Lv//7v47TTTouIrX9hNWnSpDj33HPj9ttvj82bN8eFF14Yp59+egwaNCjT9xtvvBGbNm2K+vr6aGxszBwDY8aMiYiI73//+zF8+PA4+OCDY+PGjfGf//mf8dRTT8Vjjz2W9ecHAAAAYI+TsJ2zzjorOfnkk3e6LiK2+znggAMybYYOHbrDNl/5ylcybZYvX56ccMIJSWlpadK/f//k61//erJ58+YW29pRH0OHDs2sv/LKK5MRI0YkvXr1Svr06ZOMHz8+mTt3bnafjG4mLXObJEkyf/785Mgjj0xKSkqSfffdN7n++uuTjz76KHtPRjeyq3lNkiQ5+eSTk7POOivTtivNc3v2g+zPdZIkya9//etk4sSJSUVFRVJcXJwcfPDByXe/+93tXm91dXXJlClTkvLy8qSioiKZPn160tjY2KLNzo6dbWbNmpXst99+Sa9evZK+ffsmxx57bPLUU0918FkBAAAAIEmSpCBJOvFurQAAAAAAADnQI98DAAAAAAAA6CiBBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKlXmKuOm5ub47333ovevXtHQUFBrjYDAAAAAACkQJIk0djYGIMGDYoePbL/9xg5Czzee++9GDx4cK66BwAAAAAAUugPf/hDfOITn8h6vzkLPHr37h0RWwdeUVGRq80AAAAAAAApsHbt2hg8eHAmP8i2nAUe2y5jVVFRIfAAAAAAAAAiInJ2Gww3LQcAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6OQ881td/mOtNQLf0x5Xvxnd/clP8ceW7+R4K3Uz9xvq4d/GPon5jfURsrdMv3febrNbrXPSZZltWrYq1N30vtqxale+hZE132KdsHadpOt7/8vXf0XaQbbk49j7e5/uNTTHn6bfi/camrPXPnq2zjil1GSC3ukud7S77EdG99oXOlfPAY8OajbneBHRLtXW18euip6K2rjbfQ6Gb+WBjfcxdcm988P9PGj78YEO8PPe38eEHG7K2jVz0mWZbVq+Oxu/Nji2rV+d7KFnTHfYpW8dpmo73v3z9d7QdZFsujr2P9/l+Y1Pc+cwygQdZ01nHlLoMkFvdpc52l/2I6F77QudySSsAAAAAACD1BB4AAAAAAEDqFeZ6A03rN8WGBpe1gvba9OHmiIhY/9H6aGhqyPNo6E7WbVq3w+VN67JXr5vWbcpKP91N85qG2FJXl+9hZEXzmu5Tlzp67KfxeF+3ad0u31t2Viegs7R2jLa3r7/UuGFzfLA+fa9dup7GDZs7dXvZfG0A8Gfd7fy3O7xfdLc5ofPkPPB49Ppno7SoNNebgW6noV99xOSIm5bPilie79GwJ/jlNU/mewjdXt3pU/I9BHZgTzz2r55/Zb6HALuU62P0oh++lNP+IVfUbwDawvsFezKXtAIAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9nN/DY+KVn45hhwzJ9Wag23l1yaKY996j8fVhl8eYAw/L93DoRpY3vL3D63me+M3PRr9hfbKyjbrlH+yR90VoTb+590XRQQfmexhZsfmNxd3mniQdPfbTeLxfd9T1Maxy+E7X76xOQGdp7Rhtjx0dz7ecOS5GDOydlf7Zs71V29ip94TJ5msDgD/rbue/3eH9orvNCZ0n54FHSVlxlFb2yvVmoNsp3qsoIiLKCsuisqQyz6OhOykvLt/h8pLy7NXrkvLirPTT3fSoqoye/frlexhZsaWq+9Sljh77aTzey4vLd/nesrM6AZ2ltWO0vX39pd6lRdGnLH2vXbqe3qVFnbq9bL42APiz7nb+2x3eL7rbnNB5XNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6vWcOXPmzFx03NTUFDfeeGP8y7/8S/SucpMZaK/mLUk0Ll8fnxr1qajoXZHv4dDN9OpZGqP3PjRKC0sjIqKotDAGHTIgirN4481c9JlmBWVlUTJ+fPQo7z7vid1hn7J1nKbpeP/L139H20G25eLY+3ifpcU944jhfWOvksKs9c+erbOOKXUZILe6S53tLvsR0b32hT/blhtcccUVUVJSkvX+C5IkSbLea0SsXbs2Kisro6GhISoqfFkLAAAAAAB7slznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8uoH6jfVx7+IfRf3G+i7RT67taJwdHfv7jU0x5+m34v3Gpqy0y5Zcba+z94PcM6fp1hXmryuMAei61AjypS3n+bvzWWDLqlWx9qbvxZZVq3ZrXB19PLBVe95fvBd1nnw91+Y4+zyndEV1OT4eBR7dwAcb62Puknvjgw4GFdnqJ9d2NM6Ojv39xqa485llbQo82tIuW3K1vc7eD3LPnKZbV5i/rjAGoOtSI8iXtpzn785ngS2rV0fj92bHltWrd2tcHX08sFV73l+8F3WefD3X5jj7PKd0RXXrBB4AAAAAAAC7JPAAAAAAAABST+ABAAAAAACkXmG+B0D2rNu0LhqaGjr0+DT5+P5ma+yNGzbHB+s37XJ9PrQ2rt3pj+4p28cKnaMrvSYdQ8COdKU6xZ5pV591OvJZoHlNQ2ypq9utxwHZ05ZzUO9Fna+zPxuY49zxOY+upHHDRzntX+DRjVw9/8p8D6FT5WJ/L/rhS1nvMxu66rjoehwrdJRjCICuKFefdepOn5KTfoH2cQ7aNZmX7sNc0pV81LQ+p/27pBUAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDquYdHN3LdUdfHsMrhu/345Q1vp+o+IB/f32yN/ZYzx8WIgb13uv6t2sa8XPewtXG1V772g9zL9rFC5+hKr0nHELAjXalOsWfa1WedjnwW6Df3vig66MB2P27zG4vd/wOyqC3noN6LOl9nfzYwx7njcx5dyaKl78Xfzspd/wKPbqS8uDwqSyo79Pg0+fj+ZmvsvUuLok9Z8S7X50Nr49qd/uiesn2s0Dm60mvSMQTsSFeqU+yZdvVZpyOfBXpUVUbPfv3a/bgtVbv/uQvYXlvOQb0Xdb7O/mxgjnPH5zy6kt6luY0kXNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9XrOnDlzZi46bmpqihtvvDGuuOKKKCkpycUm+JhePUtj9N6HRmlhaZfoJ9d2NM6Ojr20uGccMbxv7FWy6+vItbVdtuRqe529H+SeOU23rjB/XWEMQNelRpAvbTnP353PAgVlZVEyfnz0KN+9e4B09PHAVu15f/Fe1Hny9Vyb4+zznNLVNDU1xc3f+07OcoOCJEmSrPcaEWvXro3KyspoaGiIioqKXGwCAAAAAABIiVznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN47CHeb2yKOU+/Fe83NnWoDQDs6eo31se9i38U9Rvrd7kMAAAA6FwCjz3E+41Nceczy1oNPFprAwB7ug821sfcJffGBx8LN3a0DAAAAOhcAg8AAAAAACD1BB4AAAAAAEDqFeZ7AHSuxg2b44P1m3a6DgBom3Wb1kVDU0Pm3wAAAEB+CTz2MBf98KV8DwEAuoWr51+Z7yEAAAAAH+OSVgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKnnHh57mFvOHBcjBvbe4bq3ahvd4wMA2ui6o66PYZXDIyJiecPb7ukBAAAAeSbw2MP0Li2KPmXFO10HALRNeXF5VJZUZv4NAAAA5JdLWgEAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gcceon/vkjjn2P2if++SDrUBgD1dn1594/QDvhR9evXd5TIAAACgcxUkSZLkouO1a9dGZWVlNDQ0REVFRS42AQAAAAAApESucwN/4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1CnPVcZIkERGxdu3aXG0CAAAAAABIiW15wbb8INtyFnjU1dVFRMTgwYNztQkAAAAAACBl6urqorKyMuv95izw6Nu3b0REvPPOOzkZOJAOa9eujcGDB8cf/vCHqKioyPdwgDxQBwB1AIhQCwB1AIhoaGiIIUOGZPKDbMtZ4NGjx9bbg1RWVipgQFRUVKgFsIdTBwB1AIhQCwB1APhzfpD1fnPSKwAAAAAAQCcSeAAAAAAAAKnXc+bMmTNz1nnPnnHsscdGYWHOrpwFpIBaAKgDgDoARKgFgDoA5LYOFCRJkmS9VwAAAAAAgE7kklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSr92Bx7vvvhtnnHFG9OvXL0pLS2P06NHx0ksvZdYnSRLXXHNN1NTURGlpaRx33HGxdOnSFn3U19fH1KlTo6KiIqqqquKcc86JdevWdXxvgE7RWh144IEHYsKECdGvX78oKCiIRYsWbdfHxo0bY8aMGdGvX78oLy+Pv/u7v4tVq1Z15m4AHbSrWrB58+a4/PLLY/To0VFWVhaDBg2KM888M957770WfTgngHRr7Zxg5syZMWrUqCgrK4s+ffrEcccdFwsXLmzRhzoA6dZaHfi4888/PwoKCuL73/9+i+XqAKRfa7Xg7LPPjoKCghY/kyZNatGHWgDp1pZzgsWLF8fkyZOjsrIyysrK4pOf/GS88847mfXZ+L6wXYHHBx98EEcffXQUFRXFI488Em+88UbcdNNN0adPn0ybb3/72/Fv//Zvcfvtt8fChQujrKwsJk6cGBs3bsy0mTp1avzud7+Lxx9/PB566KH49a9/Heedd167Bg7kR1vqwPr16+OYY46JWbNm7bSfr33ta/GLX/wifvKTn8Szzz4b7733XnzhC1/ojF0AsqC1WvDhhx/GK6+8EldffXW88sor8cADD8SSJUti8uTJLfpxTgDp1ZZzgpEjR8att94av/3tb2PevHkxbNiwmDBhQvzpT3/KtFEHIL3aUge2+dnPfhb/93//F4MGDdpunToA6dbWWjBp0qRYuXJl5ue+++5rsV4tgPRqSx1YtmxZHHPMMTFq1Kh45pln4rXXXourr746evXqlWmTle8Lk3a4/PLLk2OOOWan65ubm5OBAwcm3/nOdzLL1qxZk5SUlCT33XdfkiRJ8sYbbyQRkbz44ouZNo888khSUFCQvPvuu+0ZDpAHrdWBj3v77beTiEheffXVFsvXrFmTFBUVJT/5yU8yyxYvXpxERLJgwYKsjhfIjfbUgm1eeOGFJCKSFStWJEninADSbnfqQENDQxIRyRNPPJEkiToAadfWOvDHP/4x2WeffZLXX389GTp0aDJ79uzMOnUA0q8tteCss85KTj755J2uVwsg3dpSB774xS8mZ5xxxk7XZ+v7wnb9hceDDz4Y48aNi1NPPTWqq6tj7NixMWfOnMz6t99+O2pra+O4447LLKusrIwjjzwyFixYEBERCxYsiKqqqhg3blymzXHHHRc9evTY7s/bga6ntTrQFi+//HJs3ry5Ra0YNWpUDBkyJFMrgK5td2pBQ0NDFBQURFVVVUQ4J4C0a28d2LRpU9xxxx1RWVkZhx12WESoA5B2bakDzc3NMW3atLjsssvi4IMP3q4PdQDSr63nBM8880xUV1fHAQccEBdccEHU1dVl1qkFkG6t1YHm5ub45S9/GSNHjoyJEydGdXV1HHnkkfHzn/880yZb3xe2K/D4/e9/H7fddlvsv//+8eijj8YFF1wQF198cdx9990REVFbWxsREQMGDGjxuAEDBmTW1dbWRnV1dYv1hYWF0bdv30wboOtqrQ60RW1tbRQXF2e+9Nzm47UC6NraWws2btwYl19+eUyZMiUqKioiwjkBpF1b68BDDz0U5eXl0atXr5g9e3Y8/vjj0b9//4hQByDt2lIHZs2aFYWFhXHxxRfvsA91ANKvLbVg0qRJ8cMf/jCefPLJmDVrVjz77LNxwgknxJYtWyJCLYC0a60OrF69OtatWxc33nhjTJo0KR577LE45ZRT4gtf+EI8++yzEZG97wsL2zPw5ubmGDduXNxwww0RETF27Nh4/fXX4/bbb4+zzjqrPV0BKaUOABHtqwWbN2+O0047LZIkidtuuy0fwwVyoK114DOf+UwsWrQo3n///ZgzZ06cdtppsXDhwu2+1ADSp7U68PLLL8fNN98cr7zyShQUFOR5tECutOWc4PTTT8+0Hz16dBx66KGx3377xTPPPBOf/exn8zJuIHtaqwPNzc0REXHyySfH1772tYiIGDNmTMyfPz9uv/32+PSnP521sbTrLzxqamrioIMOarHswAMPzNxJfeDAgRER2905fdWqVZl1AwcOjNWrV7dY/9FHH0V9fX2mDdB1tVYH2mLgwIGxadOmWLNmTYvlH68VQNfW1lqwLexYsWJFPP7445vo3U4AAAbjSURBVJm/7ohwTgBp19Y6UFZWFiNGjIhPfepTceedd0ZhYWHceeedEaEOQNq1Vgeee+65WL16dQwZMiQKCwujsLAwVqxYEV//+tdj2LBhEaEOQHewO98T7LvvvtG/f/946623IkItgLRrrQ70798/CgsLW80WsvF9YbsCj6OPPjqWLFnSYtmbb74ZQ4cOjYiI4cOHx8CBA+PJJ5/MrF+7dm0sXLgwxo8fHxER48ePjzVr1sTLL7+cafPUU09Fc3NzHHnkke0ZDpAHrdWBtjjiiCOiqKioRa1YsmRJvPPOO5laAXRtbakF28KOpUuXxhNPPBH9+vVr0d45AaTb7p4TNDc3R1NTU0SoA5B2rdWBadOmxWuvvRaLFi3K/AwaNCguu+yyePTRRyNCHYDuYHfOCf74xz9GXV1d1NTURIRaAGnXWh0oLi6OT37yk7tsk7XvC9t8e/MkSV544YWksLAwuf7665OlS5cmP/rRj5K99tor+e///u9MmxtvvDGpqqpK/vd//zd57bXXkpNPPjkZPnx4smHDhkybSZMmJWPHjk0WLlyYzJs3L9l///2TKVOmtGcoQJ60pQ7U1dUlr776avLLX/4yiYhk7ty5yauvvpqsXLky0+b8889PhgwZkjz11FPJSy+9lIwfPz4ZP358PnYJ2A2t1YJNmzYlkydPTj7xiU8kixYtSlauXJn5aWpqyvTjnADSq7U6sG7duuSKK65IFixYkCxfvjx56aWXkunTpyclJSXJ66+/nulHHYD0astng780dOjQZPbs2S2WqQOQbq3VgsbGxuTSSy9NFixYkLz99tvJE088kRx++OHJ/vvvn2zcuDHTj1oA6dWWc4IHHnggKSoqSu64445k6dKlyS233JL07Nkzee655zJtsvF9YbsCjyRJkl/84hfJIYcckpSUlCSjRo1K7rjjjhbrm5ubk6uvvjoZMGBAUlJSknz2s59NlixZ0qJNXV1dMmXKlKS8vDypqKhIpk+fnjQ2NrZ3KECetFYHfvCDHyQRsd3Ptddem2mzYcOG5B//8R+TPn36JHvttVdyyimntAhEgK5vV7Xg7bff3mEdiIjk6aefzrRzTgDptqs6sGHDhuSUU05JBg0alBQXFyc1NTXJ5MmTkxdeeKFFH+oApFtrnw3+0o4CD3UA0m9XteDDDz9MJkyYkOy9995JUVFRMnTo0OTcc89NamtrW/ShFkC6teWc4M4770xGjBiR9OrVKznssMOSn//85y3WZ+P7woIkSZJ2/HUKAAAAAABAl9Oue3gAAAAAAAB0RQIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAA6LCzzz47Pv/5z+d7GAAAwB6sMN8DAAAAuraCgoJdrr/22mvj5ptvjiRJOmlEAAAA2xN4AAAAu7Ry5crMv//nf/4nrrnmmliyZElmWXl5eZSXl+djaAAAABkuaQUAAOzSwIEDMz+VlZVRUFDQYll5efl2l7Q69thj46KLLopLLrkk+vTpEwMGDIg5c+bE+vXrY/r06dG7d+8YMWJEPPLIIy229frrr8cJJ5wQ5eXlMWDAgJg2bVq8//77nb3LAABACgk8AACAnLj77rujf//+8cILL8RFF10UF1xwQZx66qlx1FFHxSuvvBITJkyIadOmxYcffhgREWvWrIm//du/jbFjx8ZLL70Uv/rVr2LVqlVx2mmn5XlPAACANBB4AAAAOXHYYYfFVVddFfvvv39cccUV0atXr+jfv3+ce+65sf/++8c111wTdXV18dprr0VExK233hpjx46NG264IUaNGhVjx46Nu+66K55++ul4880387w3AABAV+ceHgAAQE4ceuihmX/37Nkz+vXrF6NHj84sGzBgQERErF69OiIifvOb38TTTz+9w/uBLFu2LEaOHJnjEQMAAGkm8AAAAHKiqKioxe8FBQUtlhUUFERERHNzc0RErFu3Lk466aSYNWvWdn3V1NTkcKQAAEB3IPAAAAC6hMMPPzzuv//+GDZsWBQW+qgCAAC0j3t4AAAAXcKMGTOivr4+pkyZEi+++GIsW7YsHn300Zg+fXps2bIl38MDAAC6OIEHAADQJQwaNCief/752LJlS0yYMCFGjx4dl1xySVRVVUWPHj66AAAAu1aQJEmS70EAAAAAAAB0hP8mBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1/h//ffiQt3ubxgAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } + "2b2d7912186a49dd9891ae12c77482c7": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2cbf0faadd4842c8b22e10541ff9de4e": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2d7a0b901d7044d5b1f273a3e9bea560": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "304e9682570b4abeb1719001c04449d6": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "32accb0adfa24c62a75c15c8ec88df8c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_23d4e25ec6c541818d5927b69576d278", + "max": 128619, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_54d9456703324160aced03ee5fef2943", + "value": 128619 + } + }, + "333b42ca7aa44788b1c22724eb11bcc3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "338747810ac74b4e83e356a01459c8a5": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e4c1e9affaba4045a3ec903091b6f454", + "max": 500, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_1946386483ed4947a2184cdb4ea6e434", + "value": 500 + } + }, + "341615c971b04033b7293d82fc40f35c": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "3499ef4dd9f243d9bef00b396e78ed69": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "358c3a67f8b54c4c899e095611fa116b": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "38b3054ad59549e4b4f2de4697139a87": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_0adb304bf90f4079a4031caea1cfb924", + "placeholder": "​", + "style": "IPY_MODEL_40021e0b59fe4e1e9bac351dbec57c6c", + "value": "Downloading: 100%" + } + }, + "3bd33a372aad4c438f64d73c97f14c6a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "40021e0b59fe4e1e9bac351dbec57c6c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "404f7ce06a01470fbb0b747981d00e84": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_38b3054ad59549e4b4f2de4697139a87", + "IPY_MODEL_7d90af87c9574f5ca21fca058c39bf02", + "IPY_MODEL_fee75343289f42fb8d6dfb4bf26fe368" ], - "source": [ - "groundtruth" - ] + "layout": "IPY_MODEL_f21c0c6379d74898ac6aadcb6fc14a8a" + } }, - { - "cell_type": "markdown", - "metadata": { - "id": "MxlrTbyPYnqB" - }, - "source": [ - "# Going further \n", - "\n", - "We have only scratched the surface in this introduction. \n", - "\n", - "More details can be found in the [`pyannote.audio` Github repository](https://github.com/pyannote/pyannote-audio).\n" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "include_colab_link": true, - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "0125df9fa8e14b3db0e2bce299529812": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_407e250e244b4985b1ce8c9d32a8af7d", - "max": 318, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_8127c4258e374ad986ce1f8b4c70f704", - "value": 318 - } - }, - "0821b47ae70444dfa38b84719c4836a6": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "0adb304bf90f4079a4031caea1cfb924": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "0b4bf8076fdf4d19843a3246c8bd61ac": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "0d10fb0edc9144b1a1fc1f2c9e322410": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d33fba0d78fb41f983c55f5cd2a0a740", - "placeholder": "​", - "style": "IPY_MODEL_fd47487fc8734594823f8afa00c4239d", - "value": "Downloading: 100%" - } - }, - "0d80273cabbc42ba9a408fb1144151c9": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "0e382d66f09f4958a40baa7ab83c4ccb": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "13525aa369a9410a83343952ab511f3c": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "1600b9cd09c446e581b7912e35c9f56e": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "16c0017f65b649f5ac5bebf1c955a1fd": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "17856a72e4e948039a66c51e8244cb50": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "183c55d5d3ce4058ae338c81344547c5": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_70efa83bf3ea45b4bd8cc41f57613328", - "IPY_MODEL_338747810ac74b4e83e356a01459c8a5", - "IPY_MODEL_ac0bcfa1ef6e4e78a7769c4cb2e8762f" - ], - "layout": "IPY_MODEL_6efb7939bb954dc8ba116680139eb257" - } - }, - "1946386483ed4947a2184cdb4ea6e434": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "23d4e25ec6c541818d5927b69576d278": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "27f6f437c5264368bc2c679942ad1e53": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "28004251b0e44a6c9dfa7ce1b30dcb18": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_e98cf7a63c814ffd94f69928f0700ebf", - "IPY_MODEL_6a4dee55cbae4959bd7fe3c4d92242b1", - "IPY_MODEL_8dba487876124827919079519406ecb8" - ], - "layout": "IPY_MODEL_5c211704f90946afbae2f66a7586ce70" - } - }, - "2b2d7912186a49dd9891ae12c77482c7": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "2cbf0faadd4842c8b22e10541ff9de4e": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "2d7a0b901d7044d5b1f273a3e9bea560": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "304e9682570b4abeb1719001c04449d6": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "32accb0adfa24c62a75c15c8ec88df8c": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_23d4e25ec6c541818d5927b69576d278", - "max": 128619, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_54d9456703324160aced03ee5fef2943", - "value": 128619 - } - }, - "333b42ca7aa44788b1c22724eb11bcc3": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "338747810ac74b4e83e356a01459c8a5": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_e4c1e9affaba4045a3ec903091b6f454", - "max": 500, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_1946386483ed4947a2184cdb4ea6e434", - "value": 500 - } - }, - "341615c971b04033b7293d82fc40f35c": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "3499ef4dd9f243d9bef00b396e78ed69": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "358c3a67f8b54c4c899e095611fa116b": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "38b3054ad59549e4b4f2de4697139a87": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_0adb304bf90f4079a4031caea1cfb924", - "placeholder": "​", - "style": "IPY_MODEL_40021e0b59fe4e1e9bac351dbec57c6c", - "value": "Downloading: 100%" - } - }, - "3bd33a372aad4c438f64d73c97f14c6a": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "40021e0b59fe4e1e9bac351dbec57c6c": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "404f7ce06a01470fbb0b747981d00e84": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_38b3054ad59549e4b4f2de4697139a87", - "IPY_MODEL_7d90af87c9574f5ca21fca058c39bf02", - "IPY_MODEL_fee75343289f42fb8d6dfb4bf26fe368" - ], - "layout": "IPY_MODEL_f21c0c6379d74898ac6aadcb6fc14a8a" - } - }, - "407e250e244b4985b1ce8c9d32a8af7d": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "41eb32a6fef141ff9cc3ce6e4d771822": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_0d10fb0edc9144b1a1fc1f2c9e322410", - "IPY_MODEL_32accb0adfa24c62a75c15c8ec88df8c", - "IPY_MODEL_bf299285318b4a04a88569cc581ecd75" - ], - "layout": "IPY_MODEL_ac2950d08fc145ba9eb9cf5824b1ee18" - } - }, - "549a30c85c47466eadedbd24da42e304": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "54d9456703324160aced03ee5fef2943": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "554e567a83b348f88092c6ba01830930": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "5c211704f90946afbae2f66a7586ce70": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "5e2c207db5424f91829bf5c52040a9f2": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "603e99f45afb4910a99f7684ffd21b6a": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_b26354d0278f447d92c7e1ad4c211d64", - "placeholder": "​", - "style": "IPY_MODEL_3bd33a372aad4c438f64d73c97f14c6a", - "value": "Downloading: 100%" - } - }, - "6242493d251a47609c0c44f1dbe82958": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "652e97509a914f3b914665c4889c6d11": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "67fcc38a1e5d4eb39381685447e397de": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6a45ce374e2e47ba9457d02e02522748": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ButtonStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ButtonStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "button_color": null, - "font_weight": "" - } - }, - "6a4dee55cbae4959bd7fe3c4d92242b1": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d7071582bfbe4ec4b2c3c9843e5481ae", - "max": 1921, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_0d80273cabbc42ba9a408fb1144151c9", - "value": 1921 - } - }, - "6e334cad2e94462cae6e722bd6f11a9e": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "6ede83f870a24e71b5182fcc458cdc42": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6efb7939bb954dc8ba116680139eb257": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "70abdfd99be84f7b9b8d24fee9eec022": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "70efa83bf3ea45b4bd8cc41f57613328": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_6242493d251a47609c0c44f1dbe82958", - "placeholder": "​", - "style": "IPY_MODEL_f439c1de68ac4c799d81fdb29d053d10", - "value": "Downloading: 100%" - } - }, - "74bf69aa6eaa4a8594b2ea9a0fb20957": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_ebc9801e164a44b3b6f8dc7f590e1c79", - "placeholder": "​", - "style": "IPY_MODEL_0821b47ae70444dfa38b84719c4836a6", - "value": " 17.7M/17.7M [00:00<00:00, 54.3MB/s]" - } - }, - "764aa53d75324d73ab06936c52fd8fc8": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "765485a1d3f941d28b79782dcffbf401": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "77a361d1ff214e8799891bbeb28a0789": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "79184c8c2a6f4b7493bb7f6983f18a09": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": "center", - "align_self": null, - "border": null, - "bottom": null, - "display": "flex", - "flex": null, - "flex_flow": "column", - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": "50%" - } - }, - "7d90af87c9574f5ca21fca058c39bf02": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_ed169fd606274f2ebbb3e8f32ab42431", - "max": 1920, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_304e9682570b4abeb1719001c04449d6", - "value": 1920 - } - }, - "8011d68253ac4080a637659ef3383dc4": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_e928540e99564d808cb2d12c92daa498", - "IPY_MODEL_fc9a3c4ae0a947ec91a227360a80f602", - "IPY_MODEL_f91dcd9f30c743d69f9d4b7e8d1beba5" - ], - "layout": "IPY_MODEL_6ede83f870a24e71b5182fcc458cdc42" - } - }, - "8127c4258e374ad986ce1f8b4c70f704": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "859b12a6d95b4c6f987791ca848122b9": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_ea95ffd922c0455d957120f034e541f8", - "placeholder": "​", - "style": "IPY_MODEL_13525aa369a9410a83343952ab511f3c", - "value": "

Copy a token from your Hugging Face\ntokens page and paste it below.
Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file.
" - } - }, - "8dba487876124827919079519406ecb8": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_67fcc38a1e5d4eb39381685447e397de", - "placeholder": "​", - "style": "IPY_MODEL_0b4bf8076fdf4d19843a3246c8bd61ac", - "value": " 1.92k/1.92k [00:00<00:00, 63.2kB/s]" - } - }, - "94756148d2e94a93ae233baba20af683": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "PasswordModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "PasswordModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "PasswordView", - "continuous_update": true, - "description": "Token:", - "description_tooltip": null, - "disabled": false, - "layout": "IPY_MODEL_b2be65e192384c948fb8987d4cfca505", - "placeholder": "​", - "style": "IPY_MODEL_333b42ca7aa44788b1c22724eb11bcc3", - "value": "" - } - }, - "99898e6ee64a46bd832af112e79b58b7": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_765485a1d3f941d28b79782dcffbf401", - "placeholder": "​", - "style": "IPY_MODEL_3499ef4dd9f243d9bef00b396e78ed69", - "value": "\nPro Tip: If you don't already have one, you can create a dedicated\n'notebooks' token with 'write' access, that you can then easily reuse for all\nnotebooks. " - } - }, - "a02030ba8f324d93a7ed6cc793d70a3b": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "a899f4bc6ed842d397723cca582669e6": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_341615c971b04033b7293d82fc40f35c", - "placeholder": "​", - "style": "IPY_MODEL_17856a72e4e948039a66c51e8244cb50", - "value": " 5.53M/5.53M [00:00<00:00, 21.7MB/s]" - } - }, - "ab32c7daa1d9404fb921f39fbc4fc05c": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "aba21021d3bb4565a58ffa40049810db": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "ac0bcfa1ef6e4e78a7769c4cb2e8762f": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_549a30c85c47466eadedbd24da42e304", - "placeholder": "​", - "style": "IPY_MODEL_bedc7d916b9745f097094c5c51a81f06", - "value": " 500/500 [00:00<00:00, 5.05kB/s]" - } - }, - "ac2950d08fc145ba9eb9cf5824b1ee18": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "b26354d0278f447d92c7e1ad4c211d64": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "b2be65e192384c948fb8987d4cfca505": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "ba18cded436e486da34882d821d8f1eb": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ButtonModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ButtonModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ButtonView", - "button_style": "", - "description": "Login", - "disabled": false, - "icon": "", - "layout": "IPY_MODEL_0e382d66f09f4958a40baa7ab83c4ccb", - "style": "IPY_MODEL_6a45ce374e2e47ba9457d02e02522748", - "tooltip": "" - } - }, - "bacfb50c001047c4824a05c9f2ee2e40": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "bcf766d2a2c641f0aa2af596c7da1b18": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_ee537ee5470f4d7b816a8c8f96948b4d", - "max": 17719103, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_652e97509a914f3b914665c4889c6d11", - "value": 17719103 - } - }, - "bedc7d916b9745f097094c5c51a81f06": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "bf299285318b4a04a88569cc581ecd75": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_bacfb50c001047c4824a05c9f2ee2e40", - "placeholder": "​", - "style": "IPY_MODEL_c53a1cf68fcd4388abf1f0379891089a", - "value": " 129k/129k [00:00<00:00, 155kB/s]" - } - }, - "c3358d32ac814ea6bc5714402c5bc62d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_ecd8e5e364d34ea8bfbba4fbd467384d", - "IPY_MODEL_0125df9fa8e14b3db0e2bce299529812", - "IPY_MODEL_e3169ca885e04536a709d5751173ce9a" - ], - "layout": "IPY_MODEL_70abdfd99be84f7b9b8d24fee9eec022" - } - }, - "c53a1cf68fcd4388abf1f0379891089a": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "c8731777ce834e58a76a295076200cfc": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "VBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "VBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "VBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_859b12a6d95b4c6f987791ca848122b9", - "IPY_MODEL_94756148d2e94a93ae233baba20af683", - "IPY_MODEL_ba18cded436e486da34882d821d8f1eb", - "IPY_MODEL_99898e6ee64a46bd832af112e79b58b7" - ], - "layout": "IPY_MODEL_79184c8c2a6f4b7493bb7f6983f18a09" - } - }, - "c8e0c9a60ef34d2caee9d55a3c21c3d4": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c9974003727a401797953ef2885db5a2": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "d12f07e25bf5422facc38c3463700994": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_eae11f84c2644ada8295b445c924baec", - "IPY_MODEL_bcf766d2a2c641f0aa2af596c7da1b18", - "IPY_MODEL_74bf69aa6eaa4a8594b2ea9a0fb20957" - ], - "layout": "IPY_MODEL_2d7a0b901d7044d5b1f273a3e9bea560" - } - }, - "d13ba6030aff42bca48c72ff071c44c0": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_c8e0c9a60ef34d2caee9d55a3c21c3d4", - "max": 5534328, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_764aa53d75324d73ab06936c52fd8fc8", - "value": 5534328 - } - }, - "d182e37b4a404158bee8446fc2728bd9": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_603e99f45afb4910a99f7684ffd21b6a", - "IPY_MODEL_d13ba6030aff42bca48c72ff071c44c0", - "IPY_MODEL_a899f4bc6ed842d397723cca582669e6" - ], - "layout": "IPY_MODEL_a02030ba8f324d93a7ed6cc793d70a3b" - } - }, - "d33fba0d78fb41f983c55f5cd2a0a740": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "d7071582bfbe4ec4b2c3c9843e5481ae": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "e1c9df12fa034c93a9b3530ea4a7c5aa": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "e3169ca885e04536a709d5751173ce9a": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_358c3a67f8b54c4c899e095611fa116b", - "placeholder": "​", - "style": "IPY_MODEL_e1c9df12fa034c93a9b3530ea4a7c5aa", - "value": " 318/318 [00:00<00:00, 11.0kB/s]" - } - }, - "e4c1e9affaba4045a3ec903091b6f454": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "e7728d9c55e44274966f8f6dbc445c54": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "e928540e99564d808cb2d12c92daa498": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_c9974003727a401797953ef2885db5a2", - "placeholder": "​", - "style": "IPY_MODEL_77a361d1ff214e8799891bbeb28a0789", - "value": "Downloading: 100%" - } - }, - "e98cf7a63c814ffd94f69928f0700ebf": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_aba21021d3bb4565a58ffa40049810db", - "placeholder": "​", - "style": "IPY_MODEL_f7812fa7fbf744c1b261b985d085e28e", - "value": "Downloading: 100%" - } - }, - "ea95ffd922c0455d957120f034e541f8": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "eae11f84c2644ada8295b445c924baec": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_2cbf0faadd4842c8b22e10541ff9de4e", - "placeholder": "​", - "style": "IPY_MODEL_ab32c7daa1d9404fb921f39fbc4fc05c", - "value": "Downloading: 100%" - } - }, - "ebc9801e164a44b3b6f8dc7f590e1c79": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "ecd8e5e364d34ea8bfbba4fbd467384d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_554e567a83b348f88092c6ba01830930", - "placeholder": "​", - "style": "IPY_MODEL_6e334cad2e94462cae6e722bd6f11a9e", - "value": "Downloading: 100%" - } - }, - "ed169fd606274f2ebbb3e8f32ab42431": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "ee537ee5470f4d7b816a8c8f96948b4d": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "f21c0c6379d74898ac6aadcb6fc14a8a": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "f439c1de68ac4c799d81fdb29d053d10": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "f7812fa7fbf744c1b261b985d085e28e": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "f91dcd9f30c743d69f9d4b7e8d1beba5": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_2b2d7912186a49dd9891ae12c77482c7", - "placeholder": "​", - "style": "IPY_MODEL_1600b9cd09c446e581b7912e35c9f56e", - "value": " 83.3M/83.3M [00:01<00:00, 60.9MB/s]" - } - }, - "fc9a3c4ae0a947ec91a227360a80f602": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_27f6f437c5264368bc2c679942ad1e53", - "max": 83316686, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_e7728d9c55e44274966f8f6dbc445c54", - "value": 83316686 - } - }, - "fd47487fc8734594823f8afa00c4239d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "fee75343289f42fb8d6dfb4bf26fe368": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_16c0017f65b649f5ac5bebf1c955a1fd", - "placeholder": "​", - "style": "IPY_MODEL_5e2c207db5424f91829bf5c52040a9f2", - "value": " 1.92k/1.92k [00:00<00:00, 48.3kB/s]" - } - } - } + "407e250e244b4985b1ce8c9d32a8af7d": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "41eb32a6fef141ff9cc3ce6e4d771822": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_0d10fb0edc9144b1a1fc1f2c9e322410", + "IPY_MODEL_32accb0adfa24c62a75c15c8ec88df8c", + "IPY_MODEL_bf299285318b4a04a88569cc581ecd75" + ], + "layout": "IPY_MODEL_ac2950d08fc145ba9eb9cf5824b1ee18" + } + }, + "549a30c85c47466eadedbd24da42e304": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "54d9456703324160aced03ee5fef2943": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "554e567a83b348f88092c6ba01830930": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5c211704f90946afbae2f66a7586ce70": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5e2c207db5424f91829bf5c52040a9f2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "603e99f45afb4910a99f7684ffd21b6a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_b26354d0278f447d92c7e1ad4c211d64", + "placeholder": "​", + "style": "IPY_MODEL_3bd33a372aad4c438f64d73c97f14c6a", + "value": "Downloading: 100%" + } + }, + "6242493d251a47609c0c44f1dbe82958": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "652e97509a914f3b914665c4889c6d11": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "67fcc38a1e5d4eb39381685447e397de": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6a45ce374e2e47ba9457d02e02522748": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ButtonStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ButtonStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "button_color": null, + "font_weight": "" + } + }, + "6a4dee55cbae4959bd7fe3c4d92242b1": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d7071582bfbe4ec4b2c3c9843e5481ae", + "max": 1921, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_0d80273cabbc42ba9a408fb1144151c9", + "value": 1921 + } + }, + "6e334cad2e94462cae6e722bd6f11a9e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "6ede83f870a24e71b5182fcc458cdc42": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6efb7939bb954dc8ba116680139eb257": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "70abdfd99be84f7b9b8d24fee9eec022": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "70efa83bf3ea45b4bd8cc41f57613328": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6242493d251a47609c0c44f1dbe82958", + "placeholder": "​", + "style": "IPY_MODEL_f439c1de68ac4c799d81fdb29d053d10", + "value": "Downloading: 100%" + } + }, + "74bf69aa6eaa4a8594b2ea9a0fb20957": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ebc9801e164a44b3b6f8dc7f590e1c79", + "placeholder": "​", + "style": "IPY_MODEL_0821b47ae70444dfa38b84719c4836a6", + "value": " 17.7M/17.7M [00:00<00:00, 54.3MB/s]" + } + }, + "764aa53d75324d73ab06936c52fd8fc8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "765485a1d3f941d28b79782dcffbf401": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "77a361d1ff214e8799891bbeb28a0789": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "79184c8c2a6f4b7493bb7f6983f18a09": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": "center", + "align_self": null, + "border": null, + "bottom": null, + "display": "flex", + "flex": null, + "flex_flow": "column", + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": "50%" + } + }, + "7d90af87c9574f5ca21fca058c39bf02": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ed169fd606274f2ebbb3e8f32ab42431", + "max": 1920, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_304e9682570b4abeb1719001c04449d6", + "value": 1920 + } + }, + "8011d68253ac4080a637659ef3383dc4": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_e928540e99564d808cb2d12c92daa498", + "IPY_MODEL_fc9a3c4ae0a947ec91a227360a80f602", + "IPY_MODEL_f91dcd9f30c743d69f9d4b7e8d1beba5" + ], + "layout": "IPY_MODEL_6ede83f870a24e71b5182fcc458cdc42" + } + }, + "8127c4258e374ad986ce1f8b4c70f704": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "859b12a6d95b4c6f987791ca848122b9": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ea95ffd922c0455d957120f034e541f8", + "placeholder": "​", + "style": "IPY_MODEL_13525aa369a9410a83343952ab511f3c", + "value": "

Copy a token from your Hugging Face\ntokens page and paste it below.
Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file.
" + } + }, + "8dba487876124827919079519406ecb8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_67fcc38a1e5d4eb39381685447e397de", + "placeholder": "​", + "style": "IPY_MODEL_0b4bf8076fdf4d19843a3246c8bd61ac", + "value": " 1.92k/1.92k [00:00<00:00, 63.2kB/s]" + } + }, + "94756148d2e94a93ae233baba20af683": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "PasswordModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "PasswordModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "PasswordView", + "continuous_update": true, + "description": "Token:", + "description_tooltip": null, + "disabled": false, + "layout": "IPY_MODEL_b2be65e192384c948fb8987d4cfca505", + "placeholder": "​", + "style": "IPY_MODEL_333b42ca7aa44788b1c22724eb11bcc3", + "value": "" + } + }, + "99898e6ee64a46bd832af112e79b58b7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_765485a1d3f941d28b79782dcffbf401", + "placeholder": "​", + "style": "IPY_MODEL_3499ef4dd9f243d9bef00b396e78ed69", + "value": "\nPro Tip: If you don't already have one, you can create a dedicated\n'notebooks' token with 'write' access, that you can then easily reuse for all\nnotebooks. " + } + }, + "a02030ba8f324d93a7ed6cc793d70a3b": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "a899f4bc6ed842d397723cca582669e6": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_341615c971b04033b7293d82fc40f35c", + "placeholder": "​", + "style": "IPY_MODEL_17856a72e4e948039a66c51e8244cb50", + "value": " 5.53M/5.53M [00:00<00:00, 21.7MB/s]" + } + }, + "ab32c7daa1d9404fb921f39fbc4fc05c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "aba21021d3bb4565a58ffa40049810db": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ac0bcfa1ef6e4e78a7769c4cb2e8762f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_549a30c85c47466eadedbd24da42e304", + "placeholder": "​", + "style": "IPY_MODEL_bedc7d916b9745f097094c5c51a81f06", + "value": " 500/500 [00:00<00:00, 5.05kB/s]" + } + }, + "ac2950d08fc145ba9eb9cf5824b1ee18": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b26354d0278f447d92c7e1ad4c211d64": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b2be65e192384c948fb8987d4cfca505": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ba18cded436e486da34882d821d8f1eb": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ButtonModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ButtonModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ButtonView", + "button_style": "", + "description": "Login", + "disabled": false, + "icon": "", + "layout": "IPY_MODEL_0e382d66f09f4958a40baa7ab83c4ccb", + "style": "IPY_MODEL_6a45ce374e2e47ba9457d02e02522748", + "tooltip": "" + } + }, + "bacfb50c001047c4824a05c9f2ee2e40": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "bcf766d2a2c641f0aa2af596c7da1b18": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ee537ee5470f4d7b816a8c8f96948b4d", + "max": 17719103, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_652e97509a914f3b914665c4889c6d11", + "value": 17719103 + } + }, + "bedc7d916b9745f097094c5c51a81f06": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "bf299285318b4a04a88569cc581ecd75": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_bacfb50c001047c4824a05c9f2ee2e40", + "placeholder": "​", + "style": "IPY_MODEL_c53a1cf68fcd4388abf1f0379891089a", + "value": " 129k/129k [00:00<00:00, 155kB/s]" + } + }, + "c3358d32ac814ea6bc5714402c5bc62d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_ecd8e5e364d34ea8bfbba4fbd467384d", + "IPY_MODEL_0125df9fa8e14b3db0e2bce299529812", + "IPY_MODEL_e3169ca885e04536a709d5751173ce9a" + ], + "layout": "IPY_MODEL_70abdfd99be84f7b9b8d24fee9eec022" + } + }, + "c53a1cf68fcd4388abf1f0379891089a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "c8731777ce834e58a76a295076200cfc": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "VBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "VBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "VBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_859b12a6d95b4c6f987791ca848122b9", + "IPY_MODEL_94756148d2e94a93ae233baba20af683", + "IPY_MODEL_ba18cded436e486da34882d821d8f1eb", + "IPY_MODEL_99898e6ee64a46bd832af112e79b58b7" + ], + "layout": "IPY_MODEL_79184c8c2a6f4b7493bb7f6983f18a09" + } + }, + "c8e0c9a60ef34d2caee9d55a3c21c3d4": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c9974003727a401797953ef2885db5a2": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d12f07e25bf5422facc38c3463700994": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_eae11f84c2644ada8295b445c924baec", + "IPY_MODEL_bcf766d2a2c641f0aa2af596c7da1b18", + "IPY_MODEL_74bf69aa6eaa4a8594b2ea9a0fb20957" + ], + "layout": "IPY_MODEL_2d7a0b901d7044d5b1f273a3e9bea560" + } + }, + "d13ba6030aff42bca48c72ff071c44c0": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c8e0c9a60ef34d2caee9d55a3c21c3d4", + "max": 5534328, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_764aa53d75324d73ab06936c52fd8fc8", + "value": 5534328 + } + }, + "d182e37b4a404158bee8446fc2728bd9": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_603e99f45afb4910a99f7684ffd21b6a", + "IPY_MODEL_d13ba6030aff42bca48c72ff071c44c0", + "IPY_MODEL_a899f4bc6ed842d397723cca582669e6" + ], + "layout": "IPY_MODEL_a02030ba8f324d93a7ed6cc793d70a3b" + } + }, + "d33fba0d78fb41f983c55f5cd2a0a740": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d7071582bfbe4ec4b2c3c9843e5481ae": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e1c9df12fa034c93a9b3530ea4a7c5aa": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "e3169ca885e04536a709d5751173ce9a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_358c3a67f8b54c4c899e095611fa116b", + "placeholder": "​", + "style": "IPY_MODEL_e1c9df12fa034c93a9b3530ea4a7c5aa", + "value": " 318/318 [00:00<00:00, 11.0kB/s]" + } + }, + "e4c1e9affaba4045a3ec903091b6f454": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e7728d9c55e44274966f8f6dbc445c54": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "e928540e99564d808cb2d12c92daa498": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c9974003727a401797953ef2885db5a2", + "placeholder": "​", + "style": "IPY_MODEL_77a361d1ff214e8799891bbeb28a0789", + "value": "Downloading: 100%" + } + }, + "e98cf7a63c814ffd94f69928f0700ebf": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_aba21021d3bb4565a58ffa40049810db", + "placeholder": "​", + "style": "IPY_MODEL_f7812fa7fbf744c1b261b985d085e28e", + "value": "Downloading: 100%" + } + }, + "ea95ffd922c0455d957120f034e541f8": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "eae11f84c2644ada8295b445c924baec": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_2cbf0faadd4842c8b22e10541ff9de4e", + "placeholder": "​", + "style": "IPY_MODEL_ab32c7daa1d9404fb921f39fbc4fc05c", + "value": "Downloading: 100%" + } + }, + "ebc9801e164a44b3b6f8dc7f590e1c79": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ecd8e5e364d34ea8bfbba4fbd467384d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_554e567a83b348f88092c6ba01830930", + "placeholder": "​", + "style": "IPY_MODEL_6e334cad2e94462cae6e722bd6f11a9e", + "value": "Downloading: 100%" + } + }, + "ed169fd606274f2ebbb3e8f32ab42431": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ee537ee5470f4d7b816a8c8f96948b4d": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f21c0c6379d74898ac6aadcb6fc14a8a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f439c1de68ac4c799d81fdb29d053d10": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "f7812fa7fbf744c1b261b985d085e28e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "f91dcd9f30c743d69f9d4b7e8d1beba5": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_2b2d7912186a49dd9891ae12c77482c7", + "placeholder": "​", + "style": "IPY_MODEL_1600b9cd09c446e581b7912e35c9f56e", + "value": " 83.3M/83.3M [00:01<00:00, 60.9MB/s]" + } + }, + "fc9a3c4ae0a947ec91a227360a80f602": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_27f6f437c5264368bc2c679942ad1e53", + "max": 83316686, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_e7728d9c55e44274966f8f6dbc445c54", + "value": 83316686 + } + }, + "fd47487fc8734594823f8afa00c4239d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "fee75343289f42fb8d6dfb4bf26fe368": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_16c0017f65b649f5ac5bebf1c955a1fd", + "placeholder": "​", + "style": "IPY_MODEL_5e2c207db5424f91829bf5c52040a9f2", + "value": " 1.92k/1.92k [00:00<00:00, 48.3kB/s]" + } } - }, - "nbformat": 4, - "nbformat_minor": 0 + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 } From 1760cf55f9faa3f4eb003c293795035c56c7bf1d Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Fri, 6 Oct 2023 11:09:55 +0200 Subject: [PATCH 02/23] intro tutorial with pyannote version 3.0 --- tutorials/intro.ipynb | 97 +++++++++++++++---------------------------- 1 file changed, 34 insertions(+), 63 deletions(-) diff --git a/tutorials/intro.ipynb b/tutorials/intro.ipynb index 7082d76c9..aeae6b730 100644 --- a/tutorials/intro.ipynb +++ b/tutorials/intro.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "9-KmdPlBYnp6" @@ -10,6 +11,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "1Fs2d8otYnp7" @@ -27,6 +29,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "tckHJKZnYnp7" @@ -43,10 +46,7 @@ "base_uri": "https://localhost:8080/" }, "id": "ai082p4HYnp7", - "outputId": "bb673846-8b58-4743-cea2-6c6270632d7f", - "vscode": { - "languageId": "python" - } + "outputId": "bb673846-8b58-4743-cea2-6c6270632d7f" }, "outputs": [], "source": [ @@ -55,6 +55,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "qggK-7VBYnp8" @@ -69,10 +70,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "uJWoQiJgYnp8", - "vscode": { - "languageId": "python" - } + "id": "uJWoQiJgYnp8" }, "outputs": [], "source": [ @@ -81,6 +79,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "EPIapoCJYnp8" @@ -94,10 +93,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "Mmm0Q22JYnp8", - "vscode": { - "languageId": "python" - } + "id": "Mmm0Q22JYnp8" }, "outputs": [], "source": [ @@ -113,10 +109,7 @@ "height": 233 }, "id": "ToqCwl_FYnp9", - "outputId": "a1d9631f-b198-44d1-ff6d-ec304125a9f4", - "vscode": { - "languageId": "python" - } + "outputId": "a1d9631f-b198-44d1-ff6d-ec304125a9f4" }, "outputs": [ { @@ -141,6 +134,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "p_R9T9Y5Ynp9" @@ -158,10 +152,7 @@ "height": 230 }, "id": "bAHza4Y1Ynp-", - "outputId": "c4cc2369-bfe4-4ac2-bb71-37602e7c7a8a", - "vscode": { - "languageId": "python" - } + "outputId": "c4cc2369-bfe4-4ac2-bb71-37602e7c7a8a" }, "outputs": [ { @@ -187,6 +178,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "L3FQXT5FYnp-" @@ -204,10 +196,7 @@ "height": 62 }, "id": "rDhZ3bXEYnp-", - "outputId": "a82efe4e-2f9c-48bd-94fb-c62af3a3cb43", - "vscode": { - "languageId": "python" - } + "outputId": "a82efe4e-2f9c-48bd-94fb-c62af3a3cb43" }, "outputs": [ { @@ -237,6 +226,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "hkzox7QIYnp_" @@ -250,6 +240,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "3hmFmLzFYnp_" @@ -263,10 +254,7 @@ "execution_count": null, "metadata": { "id": "xC05jFO_Ynp_", - "outputId": "c5502632-56ae-4adb-8bdc-112deedc8893", - "vscode": { - "languageId": "python" - } + "outputId": "c5502632-56ae-4adb-8bdc-112deedc8893" }, "outputs": [ { @@ -326,6 +314,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "ctw4nLaPYnp_" @@ -337,6 +326,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "x9AQgDzFYnp_" @@ -356,10 +346,7 @@ "execution_count": null, "metadata": { "id": "iZaFudpDYnp_", - "outputId": "981274fa-e654-4091-c838-91c81f921e5d", - "vscode": { - "languageId": "python" - } + "outputId": "981274fa-e654-4091-c838-91c81f921e5d" }, "outputs": [ { @@ -411,6 +398,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "5MclWK2GYnp_" @@ -422,8 +410,8 @@ "\n", "To load the speaker diarization pipeline, \n", "\n", - "* accept the user conditions on [hf.co/pyannote/speaker-diarization](https://hf.co/pyannote/speaker-diarization)\n", - "* accept the user conditions on [hf.co/pyannote/segmentation](https://hf.co/pyannote/segmentation)\n", + "* accept the user conditions on [hf.co/pyannote/speaker-diarization-3.0](https://hf.co/pyannote/speaker-diarization-3.0)\n", + "* accept the user conditions on [hf.co/pyannote/segmentation-3.0](https://hf.co/pyannote/segmentation-3.0)\n", "* login using `notebook_login` below" ] }, @@ -452,10 +440,7 @@ ] }, "id": "r5u7VMb-YnqB", - "outputId": "c714a997-d4f8-417a-e5ad-0a4924333859", - "vscode": { - "languageId": "python" - } + "outputId": "c714a997-d4f8-417a-e5ad-0a4924333859" }, "outputs": [ { @@ -571,10 +556,7 @@ ] }, "id": "lUq1UvoJYnqB", - "outputId": "8c052808-d0b2-4f2e-8771-f86114ae3fe3", - "vscode": { - "languageId": "python" - } + "outputId": "8c052808-d0b2-4f2e-8771-f86114ae3fe3" }, "outputs": [ { @@ -692,11 +674,12 @@ ], "source": [ "from pyannote.audio import Pipeline\n", - "pipeline = Pipeline.from_pretrained('pyannote/speaker-diarization', use_auth_token=True)\n", + "pipeline = Pipeline.from_pretrained('pyannote/speaker-diarization-3.0', use_auth_token=True)\n", "diarization = pipeline(DEMO_FILE)" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "xkicJRq-YnqB" @@ -714,10 +697,7 @@ "height": 230 }, "id": "DPosdyGrYnqB", - "outputId": "45a2315e-6841-4de4-e54e-1f3da7cf2d46", - "vscode": { - "languageId": "python" - } + "outputId": "45a2315e-6841-4de4-e54e-1f3da7cf2d46" }, "outputs": [ { @@ -737,6 +717,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "DLhErS6wYnqB" @@ -751,10 +732,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "vNHQRTUIYnqB", - "vscode": { - "languageId": "python" - } + "id": "vNHQRTUIYnqB" }, "outputs": [], "source": [ @@ -771,10 +749,7 @@ "base_uri": "https://localhost:8080/" }, "id": "9d0vKQ0fYnqB", - "outputId": "9a664753-cd84-4211-9153-d33e929bb252", - "vscode": { - "languageId": "python" - } + "outputId": "9a664753-cd84-4211-9153-d33e929bb252" }, "outputs": [ { @@ -790,6 +765,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "Xz5QJV9nYnqB" @@ -809,10 +785,7 @@ "height": 230 }, "id": "xMLf4mrYYnqB", - "outputId": "ed08bcc8-24c6-439c-a244-3a673ff480b0", - "vscode": { - "languageId": "python" - } + "outputId": "ed08bcc8-24c6-439c-a244-3a673ff480b0" }, "outputs": [ { @@ -841,10 +814,7 @@ "height": 230 }, "id": "Z0ewsLlQYnqB", - "outputId": "8a8cd040-ee1d-48f7-d4be-eef9e08e9e55", - "vscode": { - "languageId": "python" - } + "outputId": "8a8cd040-ee1d-48f7-d4be-eef9e08e9e55" }, "outputs": [ { @@ -864,6 +834,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "MxlrTbyPYnqB" From c23e7c04bc97258a178b009bdb3ba32333a43477 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Fri, 6 Oct 2023 11:17:42 +0200 Subject: [PATCH 03/23] setting pyannote import to version 3.0.1 --- tutorials/intro.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/intro.ipynb b/tutorials/intro.ipynb index aeae6b730..cb88384f7 100644 --- a/tutorials/intro.ipynb +++ b/tutorials/intro.ipynb @@ -50,7 +50,7 @@ }, "outputs": [], "source": [ - "!pip install -qq https://github.com/pyannote/pyannote-audio/archive/refs/heads/develop.zip\n", + "!pip install pyannote.audio==3.0.1\n", "!pip install -qq ipython==7.34.0" ] }, From 8f700ac8ebf78786c37f4318471f00a626258eef Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Tue, 10 Oct 2023 09:22:48 +0200 Subject: [PATCH 04/23] Using GPU in intro.ipynb when available --- tutorials/intro.ipynb | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tutorials/intro.ipynb b/tutorials/intro.ipynb index cb88384f7..1b8093be1 100644 --- a/tutorials/intro.ipynb +++ b/tutorials/intro.ipynb @@ -50,7 +50,7 @@ }, "outputs": [], "source": [ - "!pip install pyannote.audio==3.0.1\n", + "!pip install -qq pyannote.audio==3.0.1\n", "!pip install -qq ipython==7.34.0" ] }, @@ -673,8 +673,16 @@ } ], "source": [ + "# load pretrained pipeline\n", "from pyannote.audio import Pipeline\n", "pipeline = Pipeline.from_pretrained('pyannote/speaker-diarization-3.0', use_auth_token=True)\n", + "\n", + "# send pipeline to GPU (when available)\n", + "import torch\n", + "if torch.cuda.is_available():\n", + " pipeline.to(torch.device('cuda'))\n", + "\n", + "# run the pipeline\n", "diarization = pipeline(DEMO_FILE)" ] }, From 1f19793738dea23454f799dfc015f6ac4e5bd05e Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Tue, 10 Oct 2023 15:42:22 +0200 Subject: [PATCH 05/23] Using GPU in intro.ipynb when available --- tutorials/intro.ipynb | 254 ++++++++---------------------------------- 1 file changed, 49 insertions(+), 205 deletions(-) diff --git a/tutorials/intro.ipynb b/tutorials/intro.ipynb index 1b8093be1..93c7f623f 100644 --- a/tutorials/intro.ipynb +++ b/tutorials/intro.ipynb @@ -1,7 +1,6 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "9-KmdPlBYnp6" @@ -11,7 +10,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "1Fs2d8otYnp7" @@ -29,7 +27,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "tckHJKZnYnp7" @@ -55,7 +52,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "qggK-7VBYnp8" @@ -68,7 +64,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": { "id": "uJWoQiJgYnp8" }, @@ -79,7 +75,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "EPIapoCJYnp8" @@ -91,7 +86,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": { "id": "Mmm0Q22JYnp8" }, @@ -102,7 +97,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -114,12 +109,12 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABiYAAADyCAYAAADJJ33UAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdeXxU1f3/8XfWyWTfSAIYdtmhIFh+4Nq6l0dbv92+ttS131b7xa2LtdYNtSp1rWIrgrsVqIq461dUXJBFRNljAoRNIAlknewhub8/6Iwzk9mXO0l4PR8PHiT3nnvuuWf53HPnwJ04wzAMAQAAAAAAAAAAmCA+1gUAAAAAAAAAAADHDhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmOaYXJi45JJLFBcX1+3Pjh07vO4799xzHccPGTLEY5q5c+c60uzdu1czZ85UamqqCgoKdN111+nIkSOO/QcPHtQvfvELjRw5UvHx8br22mu7lfPll1/W1KlTlZ2drbS0NE2aNEnPPfdcdCunF+st7SpJdXV1mj17tvr37y+LxaKRI0fqrbfeil7l9HL29rviiiu67Zs9e7bi4uJ0ySWXuKTtKW1tt2TJEsXFxen8888Pszb6rki3syStWrVK3/ve95STk6OUlBRNmDBBDzzwgDo7O13S1dTUaNasWcrMzFR2drZ+9atfqbGx0bG/tbVVl1xyiSZMmKDExESP7fjhhx96LFNFRUUEagcAAAAAAKDvSIx0hrVN7ZHO0qectOSQjjv33HP11FNPuWzr16+f130Wi8Xl99tvv12//vWvXbZlZGRIkjo7OzVz5kwVFRVp1apVOnjwoC666CIlJSXprrvukiS1tbWpX79+uummm/Tggw96LGNubq5uvPFGjR49WsnJyXrjjTd06aWXqqCgQOecc05I1x2O+rZ6U8+XZckK+pje0K7t7e0666yzVFBQoJdeekkDBw7Unj17lJ2dHfT1RkpLfatp57JmpYR0XHFxsZYsWaIHH3xQVqtV0tEPixctWqRBgwa5pO0pbW23e/du/fGPf9Qpp5wS/IVHUGd1tannS8jLC/qYSLbzsmXL9LOf/UyXXnqpVqxYoezsbL333nv605/+pNWrV+uFF15QXFycJGnWrFk6ePCgli9fro6ODl166aX6zW9+o0WLFkk62iesVquuvvpqLV261Oc1lJaWKjMz0/F7QUFB0PUAAAAAAADQl0V8YeK8e1ZEOkuf1twW2gf0FotFRUVFQe+zy8jI8Jrm3Xff1bZt2/Tee++psLBQkyZN0h133KHrr79ec+bMUXJysoYMGaKHHnpIkvTkk096zOf00093+f2aa67RM888o5UrV8ZkYeLCt39h6vleO//NoI/pDe365JNPqqamRqtWrVJSUpKko/+CP5aevegl0851+au/DOm4E044QTt37tTLL7+sWbNmSTr6v4oGDRqkoUOHuqTtKW0tHf1Ae9asWbrtttv0ySefqK6uLpjLjqiKiZNMPd/A/fuCPiZS7dzU1KRf//rX+sEPfqAFCxY4tv/P//yPCgsL9YMf/EAvvPCC/vu//1slJSV65513tG7dOk2dOlWSNG/ePH3ve9/TfffdpwEDBigtLU2PPvqoJOnTTz/12Y4FBQUxXWgEAAAAAADo6Y7JVzlF2+rVqzVhwgQVFhY6tp1zzjlqaGjQ1q1bQ8rTMAy9//77Ki0t1amnnhqpoiIIkWrX1157TdOnT9fs2bNVWFio8ePH66677ur2ahl0d9lll7n8C/knn3xSl156acTPE8kxfPvtt6ugoEC/+tWvIl3MPisS7fzuu++qurpaf/zjH7vt+/73v6+RI0dq8eLFko62d3Z2tmNRQpLOPPNMxcfHa+3atUGXf9KkSerfv7/OOussffrpp0EfDwAAAAAA0NcdswsTb7zxhtLT0x1/fvrTn3rdl56e7nh9i93111/fLc0nn3wiSaqoqHD5QFOS4/dg3zVeX1+v9PR0JScna+bMmZo3b57OOuusUC75mNAb2rW8vFwvvfSSOjs79dZbb+nmm2/W/fffr7/+9a+hXvYx45e//KVWrlypPXv2aM+ePfr000/1y192/x8YPaWtV65cqSeeeEILFy4M9lKPaZFo57KyMknSmDFjPJ5j9OjRjjQVFRXdXreUmJio3NzcoNq7f//+mj9/vpYuXaqlS5equLhYp59+ur744ouA8wAAAAAAADgWRPxVTr3Fd77zHcdrOSQpLS3N6z7p6Pc9OLvuuuscX8JqN3DgwIiXMyMjQxs2bFBjY6Pef/99/f73v9ewYcO6veYJR/WGdu3q6lJBQYEWLFighIQETZkyRfv379e9996rW2+9NaLn6mv69eunmTNn6umnn5ZhGJo5c6by8/O7pesJbW2z2XThhRdq4cKFHssI7yLZzoZhRLWszkaNGqVRo0Y5fp8xY4Z27typBx98UM8995xp5QAAAAAAAOjpIr4w8fafvhPpLKMiLS1NI0aMCHqfXX5+vtc0RUVF+uyzz1y2VVZWOvYFIz4+3nGeSZMmqaSkRHfffXdMFiaeO2+R6ecMVm9o1/79+yspKUkJCQmObWPGjFFFRYXa29uVnBzaF7qH46Jnf2L6OUN12WWX6corr5Qk/eMf//CYpie09c6dO7V79259//vfd2zr6uqSdPRf45eWlmr48OEB5RUpRZs2mHq+cITbziNHjpQklZSUaMaMGd32l5SUaOzYsZKOtmlVVZXL/iNHjqimpibomO3u29/+tlauXBlWHgAAAAAAAH1NxBcmctLM/1C1p5k+fbruvPNOVVVVOV4Psnz5cmVmZjo+CAtVV1eX2traIlHMoGVZsmJy3p4iUu160kknadGiRerq6lJ8/NG3qZWVlal///4xWZSQJGtWSkzOG4pzzz1X7e3tiouLi9qXwEeirUePHq3Nmze7bLvppptks9n00EMPqbi4OOLl9ichL8/0c4Yq3HY+++yzlZubq/vvv7/bwsRrr72m7du364477pB0tL3r6uq0fv16TZkyRZL0wQcfqKurS9OmTQvrOjZs2KD+/fuHlQcAAAAAAEBfc8y+ysmXtra2bu8VT0xMdHmViM1m65YmNTVVmZmZOvvsszV27FhdeOGFuueee1RRUaGbbrpJs2fPlsVicaTfsOHov15ubGzUoUOHtGHDBiUnJzs++Lz77rs1depUDR8+XG1tbXrrrbf03HPPdXt1CQLTU9r1t7/9rR555BFdc801uuqqq7R9+3bddddduvrqq6N16X1KQkKCSkpKHD970hPaOiUlRePHj3fJPzs7W5K6bUd34bZzWlqaHnvsMV1wwQX6zW9+oyuvvFKZmZl6//33dd111+knP/mJfvazn0k6+j+Wzj33XP3617/W/Pnz1dHRoSuvvFIXXHCBBgwY4Mh727Ztam9vV01NjWw2m6P9J02aJEn6+9//rqFDh2rcuHFqbW3V448/rg8++EDvvvtuxOsHAAAAAACgVzOOQRdffLHxwx/+0Os+Sd3+jBo1ypFm8ODBHtNcfvnljjS7d+82zjvvPMNqtRr5+fnGH/7wB6Ojo8PlXJ7yGDx4sGP/jTfeaIwYMcJISUkxcnJyjOnTpxtLliyJbGX0Ib2lXQ3DMFatWmVMmzbNsFgsxrBhw4w777zTOHLkSOQqo4/x1baGYRg//OEPjYsvvtiRtie1dTDXcayLdDsbhmF8/PHHxjnnnGNkZmYaycnJxrhx44z77ruv23irrq42fv7znxvp6elGZmamcemllxo2m80ljbd+Y/e3v/3NGD58uJGSkmLk5uYap59+uvHBBx+EWSsAAAAAAAB9T5xhmPjNoAAAAAAAAAAA4JgWH+sCAAAAAAAAAACAYwcLEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0iaEe2NXVpQMHDigjI0NxcXGRLBMAAAAAAAAAAOhlDMOQzWbTgAEDFB/v/f9FhLwwceDAARUXF4d6OAAAAAAAAAAA6IP27dun4447zuv+kBcmMjIyHCfIzMwMNRsAAAAAAAAAANAHNDQ0qLi42LF+4E3ICxP21zdlZmayMAEAAAAAAAAAACTJ79c/8OXXAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADBNwpw5c+aEcmBbW5vmzp2rP/zwfB359wtKHD5M8enpAR9/2Nam51ft1uD8NKVaEr1ua9+yVTuu+qNe6ipUZl6WXv58nwbnp6m5vdORtrm9U0+s3KjNtuUakj1ILUdatGz7yxqYcZzj57SkdL296y3H3/Z9/9q0TO990an15XUaUZjhOK83Na01WlTyvL6s+kJDsobImmj1mdZeDns6T9uC0VlZqcbHFihx+DAZTU2On73VffuWrar57WwljRunhIICv/l7aoNw2fPMSEl0tF+k8u4N5+8pAq0He7riqj1qvuZq1Q8brSVltqjUm3t/O7Buk57865MaVJiljIGFXtNF22Fbmxau2KG1Ow4HFBd6Ek9l91Z/9u2DuprUMe8htX70kZLGjFaNkeSS/rCtTS+9uV4F/1qoqk/W6CVbhgYPyPGbt70cuWnJ+tenu7rVZ6D3gc7KSjXc/4CjfJ7inT02xmVmau8zS/RijcVvGSPF/b6QXN3gKEvTc/9yidHOMTyQe2awMT/WnMvb0h6vTa9sU9bATHW0dGjLsyvVseRZffHCFh3Y0yprv3SVvLNdWQMzlWxNCij/pppmR572Yzxtc97eld+pN75+3XHfX7p+qepXNCu/ONfjeb3l58zTXMC9z7vHWU/9sLyuXPesm6thWcOVk5ITVF0Hyn7e5LYjemXBZyocnKOsrBSv152UluRol46WDpe68JTGvY68tdGnr6zRxx0rNChnUEDzH/f5krc5lXs71LTWeJ3b+ZuDedvv6zjnfc7zT0/zvpYjLY7yZluyHfNR5+tznqeGMk+MFnv5co/ka+uLpdr35QHlDs72Ow797Qu1HIHWT6Bt6q2dhmQN0b5DHbrpxY0aWZQhQ4rI/eSwrU1PrNyodbWvaXP1hm7PFIHWma+x5dynXt6+NKBnF2fu4+tg40HdsfIBrdmcps92HVBJ83saku19PO8/eEAvzX9VtVsbVDA0X8nWJK/3tJb2eK1fvFH7vjyguCJDL+5+wW95a1pr9PgXi/XMJ9s1qihPuame74v2urY/J0ZiXHkb955igCe+Yn+w9wWz58jhOmxr07+Wl6lp1T6lZiar5J3t6srv1Gvrn1DtP/+hjFHjZGnt1KGHH9e2r7qUPTSv270oUuUIdK4f7Tq259/Z2aW/vrJFI4sylJdh6TZnDHYO6czbZxie+pt9W15Kvt7c9KbWLdqkhm2NShqQ4JhPRfL+FMrnI4HMEdyv/dM9G7R2W6KGF2S7PB8E8xmBc1+w1FU7nk/qBw3X4g1VXud9rUaDFpU8r9UHVmnz4U1BxeJg6inc+21TTbM+eWmz3tqwXU0PPajmdZ/rleZMxzOVLzsOH9TtH87XpqptWl+a7KjnYDiPy7QjXT7nraHwVz+dlZXadd88Pb+iTEMGF6g10RLQc26sPmMKNzaVHWzQ3Cc/1PEvP624VSs99mNP56po2as7Vj6gjaUZGlWU7/hs1j6WfI0p93PaP3uw1+fA5ERtf6tUSWlJWvPGV3pnf52GFmUGPDaD/czP+ThPn1eEo6a1RktXzlfD/Q+oYsMqPdL+fxqWO1KGDK/zhnA+L7Y/A326RSGNv0AdtrXp/vc+0SOfvqPh/bKUHJfuEuucr+2lbS/q9YWv64YbbpDFYvGaZ9j/Y6Jjxw7ZHnhQnVVVQV/MEx/u1GFbm89tHWVlqirZqae32VRe1ejY75z2sK1N/163Ta/tekG1rTWqba3RktJFLj/vte1x+du+7+Wv3tWyzyq1ePUel/N6U9tao1d3LtOrO5eptrXGb1r7uXxtC0ZnVZWjvp1/9qajrEzta9aoo6wsoPw9tUG47Hk6t5+ZYn3+niLQerCna9haovY1a1RZWh61enPvb5Wl5VpccIIqS8t9pou2w7Y2LV69J+C40JN4Kru3+rNvr92zX00LFqppwUJ1VlV1S3/Y1qa339+orqef1IGX39CTn1cGlLe9HOVVjR7rM9D7QGdVlUv5PLHHw46yMn393L8DKmOkuN8XnMviHqMDiduerivQmB9rzmVsrm3R+iWb1VzboubaFpW++JkaX31LZYeyteW9vardV+/YHyjnPH1tc95eUV3hct9/Z/M72vbSDq/n9ZafM09zAfc+H0jf3mvbo63VW7TXtifgOgiWI/bvqpXW7lfFgQaP6ezX7dwu7nXhKY23fNzbaN3yL7V074sBz3/c50ve5lTu7eBrbudvDuZtv6/jPM05vc37nMvrPB91Tue+vaewl6+q8pA2vfqVNr/6VUDj0N++UMsRaj/ytt1bO9W21qi8qlFf7qlVeVVjxO4n9meXd/e97vGZItA68zW2nPtUoM8u7sc7H7fXtkclVfv09hd1em1jqeO5y5uqykPqWCltf3O34zq83dOaa1sc/aqiuiKg8ta21uitHR/ri5Is7a6u9prO/TkxEryN+0D7p6/YH+x9wew5crgO29r06kflKlm2zXEvqaiu0OpNr2nE8x+p/uud6qyqUvVTi/XlG+Ue70WRKkegc/1o17E9/y1f1ztijdR9zhjOPNDbZxie+pt9W1ldqT7c9qG0KlHb39ztMp+KpFA+HwlkjuCc9tWdy/TWjo+1aOWBbs8HwXxG4NwXnJ9Pqr6u9Dnvs5fh//a8HXQs9nbNnoQ7VpprW7TmzTJ9+tlXGvnuUtW8/rbLM5Uvu2urVN7+oVbs+dSlnoPhMpf2M28Nhb/66ayq0oGX39DzOk5VX1cG/Jwbqxgcbmwqr2rU12V7lPz8M177sadz2ecDL6+pcfls1l4XvurE/Zz2zx7s9VlxoMHxrLHmzTI9u3ZvUGMz2PZwPi7Sn//UttZo9abXNPTlNdq/9n1ttZVpr22Pz3lDOJ8X25+BQh1/gTpsa9O720q1b/cwfXVoX7dY53xtS7e/GFCevMoJAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAacL+NgyjqSms420tHaptanf87Etz2xGPxztrbG/0eGxLR3NA6aKhsb1R9W31ET1vV119RPLxxrldIpFXtPLuDefvKQKtB2/jMBr15u1cjZ1yOZe/2IDAuLehv3q1p/eUzte+cMoVbn5G4zcxNpJlDLcs7rrq6tXp453YzulCPTYWPJW3rdF33GhrbFdLfWtA+fvKyz0f97Tu919v5/VX3mBEsm+Hq7X96Byqo6kjoOv2VQ8dzf6vxbl+nfNynhP54m2+5G1OZd8eyDzLWxn8HevpOE/HBDPv81buQOvJLIHOXz2Nq0iOKefyhNOP3PMJdn4e7pwo0HjgLz76GltmPOv4agf3Zy9nkX6OaWrt9NoeznUdqXHlbdzHUm95vnFuD4/3knqbujq/aaNoxI9QRauO/cUD+7wv2s//gYr0/SmcsRNKDPf2fBBI+wb67OQvbSh1GMw1BjO3dj/Ok0Dqxv2zulDGi3Od+Zu3hiKYeNLY3qXEIObusYjB0Xq28HQtvj4jcufpc9tgOd8fQhmbgbZHLJ/PfI3pcGNENPujc501t3muw2DjetgLEw1zblNGfOj/8eKqZz8POO09b5Z4Pd7+heU3r7rR47GPbvqny+/fpOsX8PlD5a1M4ai+4OcRz9NZMO3Sk/LuDefvKYKtBzPr7bpyq3TPCtPOd6wIp82HhplXoOcJV/0Nf5HyBkU835DL4kU4MTza8T/S3rzlfUlSpp/9kTqPN/Z7caZyInpeX2LdB509u3KXfiBp88OrtTmA9L7qZ+X8dcEfn3f0r3DnRN6Od93ue24XahkCPS6Y/AO7nt7DjHElRa5+Qs0nEmPb/uziS0D1GaGxFQpf58w8nKNTdK7HfZG+j9398kHdrYNe9/t7ToyEWI/ZnnS/8ec/XdbjvcTyP39UtSSlDpBkXkwJRKzquKfN+2Ld152FUhZv7RiJ9g00j2jXYVjjxtr9I8JArivRWqXc0cEd40uw89ZI+90nNdIngX8BcW+Kwf4Ecy2e0nr63DZYK+evc/TFUOq2N7SHrzgQeow4+gwU7etPSj/69xNvH5HU/VzBlp9XOQEAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADBN2N8xkTnnVhm33xHy8fMumqoRRRmSpB0VNp/vwvrTzDHd3lc276KpkqTfvfiWJOmOGXdK6v5Oq99O/F+X75mwp/vze38PueyBumPGnRqSdfQN7bvrd0XknYJ5SxZLit47J53bJVzu7RrJvHvD+XuKQOvB2ziMRr15O9e9w1o0/iff85sOwXFvQ3/1ak+/o8KmBx7Y43VfuG0TzH3An6y775Lumx/xMoZaFm/fM5G3ZLGSxo7xm0fHtpJucT7QY2PBU3ln3n6GJOmTPzzr8ZiZt5+hvCE5AeVfvbvW63tz3fNxT2u/79/72n0+z+vrHMGKZN8O10UnD1Vd2ZeacPV0TZ46sNt+9+u2t5unujj5ihP9fs+Ec/1W767VoodelOQ6J/LF23zJ25zKvn13/S6/cztvZfA3R/N0nKdjgpn3OZfbOV2g9WSWQOevnsZVJMeUXbj9yD2fYOfn4c6JdlTYHM8uvviLj77GVqSeOXzx1Q5bN23T5td3etwX6eeYG37UX6eOGO1xn3NdR2pceRv3Uuzev99bnm92VNg0Z/5qSZ7vJW2P36cB6QNVf9kfJPm+F5ktWnXsb35gn/d5mmPFQqTvT+HEqlBiuLfng0DaN9BnJ39pQ6nDYK4xmLm1s+rdtXrm7o+6bQ+kbj7ZtVmP7wjuGHfOdeZv3hqKYOYjD56Sq8ShwwKeu8ciBkfr2cLTtfj6jEhy/U4DT5/bBuvkK07UK8986bU87kL9zC+Wz2e+5g2hxgj7M1A0++OOCpt+//IuSdKvzkvUpH6TutXhN5+3/ymgPMNemIhLS5MRxvEZ1iTlpCU7fvYl1dK9uO7HpCenezzWmpQaULpoSE9OV5YlK6Lnjc/Oikg+3ji3SyTyilbeveH8PUWg9eBtHEaj3rydKz1BLufyFxsQGPc29Fev9vSe0vnaF065ws0vLv2bGBvJMoZbFnfx2VlKyMvzut+u00OsD/TYWPBUXku677hhSU+WNSsloPx95eWej3ta9/uvt/P6K28wItm3w5WSfHQOlZSWFNB1+6qHpFT/1+Jcv855Oc+JfPE2X/I2p7JvD2Se5a0M/o71dJynY4KZ93krd6D1ZJZA56+exlUkx5RzecLpR+75BDs/D3dOFGg88BcffY0tM551fLWD+7OXs0g/x6SlJHhtD+e6jtS48jbuY6m3PN84t4fHe0lWhuIzvmmjaMSPUEWrjv3FA/u8z9McKxYifX8KZ+yEEsO9PR8E0r6BPjv5SxtKHQZzjcHMrd2P8ySQunH/rC6U8eJcZ/7mraEIJp6kJ8crKYi5eyxicLSeLTxdi6/PiNx5+tw2WM73h1DGZqDtEcvnM19jOtwYEc3+6FxnqRbPdRhsXOdVTgAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEyTMGfOnDmhHNjW1qa5c+fqz3/6k1KPGyjL9OmK9/E+bU+syQmaMjTX5R1k3bZ1dqlj2zZln3qSThh7nHLTLY79zmkNGRo/sJ9OKJoka6JVKQlWTeg30fHzuLzxyrbkOP6275NhaEjWUE0elK//NyI/oPehGYY0OneMJheecDQPH5zL4WtbMOLS0o7Wd1raNz97q/vOLh3ZsUOpP/mxEgoKAsrfU7uEy5qcoMmDc1zaz0yxPn9PEWg9WJMT9K2BmUrcs0vW889X+oDCqNWbyzhua1f8yo/17bOmKWNgodd0ZjAMQxOKswOOCz2Jp7J7qz97n0hJilfylClKOe00xaend09vSCMK05Uyfqyyp07SlFFFfvO2l+Pbw/OUkpTgsT4Dug/8Jy/n8nkSl5Ymy7RpiktNVdbEcQGVMVLc7wv2siQU9OsWo/3GbU/XFWjM7wHsZYxLS1OSNVEDxhc63tGa3S9Z8Qnx6j9jpI6bWqzUHKsGjC9UchDv97Tn6XyMp2327UVjC5SRnuG47xqGoeMLR2jQxOO8ntdbfs48zQWc+7ynOOveD7sMQ/ts+/Td4jOUkxL8lxQGypp8dPzts7Vq8klDlOXlXb32+rK3S5I1qVtduKfxVEce688wNHB4f32r/6SA5z/u8yVP8yePczIfczt/czBv+30d5z7n9DXvs5d3Ur/JrvPR/6TrNk/tQVISrBqXP04pCRYVjM5X8eQBAY1Df/tCKUcw9RNom3pqp8mFJ8iSkKLdh5r0vUkDlJdhidj9xJChEYXpGt9vnMdnioDrzMfYsvcpS0JKwM8ublm71MOu+nKNz5+o8QPzNWlQoeO5y5Muo0u76nZp+IQhGjplkOM6PN3T4tLSJBkqGJ2vgRP7KzElMaDytna0KCnR0Hnjxyg31ft90f05MRK8jftA+qev2B/KfWC0/i0AACAASURBVMHsOXK4DEljBueoeGKhUnOsKhpboM7EI+q0JmvQmecrNStPMiTr5PEaeEKxx3tRRMoRxFw/2nVsTU7QxOJsVdS3OmKN1H3OGM480NP90lN/s287deDpsiRaZBjS8ROHafCkYpf5VCSF8vlIIHMEO8OQhmQO0YTC0Zo2vNDl+SDYzwgcfSE54Zvnk1NOUVpOps9539EyDNX4/AlBx2Jv1+xJ+GPFkHVAphIS4pQ/9VvK//YJLs9U3nQZhrZXNGhi4UhN7j/GpZ6DOvt/xuUJQ3P9zltD4a9+OpuaZFWnvn3qt5SekxnQc24sP2MKJzZ1GYZ2VNh0wpAcZU7/ttd+7H6ulKR47aov14kDJumk4wc6Ppu1jyVfY6rbOf/zbG+vzxOH5Skzy6KisQVKSknUccfn68Tj+wU8NkP5zO+bz0M8f14RjtaOVkmGckZ/S9XD8vXdIWcrJyXH57whrM+LDUNj+o0IefwFqra5Ra1J23X+hEkakJnn0g+dyx93JE6vL3xdN9xwgywWi9f84gzDCOm7qxsaGpSVlaX6+nplZmaGfEEAAAAAAAAAAKD3C3TdgFc5AQAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATJMwZ86cOaEc2NbWprlz5+qGG26QxWKJcLFCU3awQTe9uFEjizKUl/FNmQ7b2vT8qt0anJ+mVEtit9/xjc7KSjU+tkBxmZlqeu5f3f5OHD5M8enpsS4mABO0b9mqmt/OVtK4cUooKOiV57PHNH+xK9B0kVTTWqPnP3te6xZt0sGvKrSic7k21mzUkKwhsiZaTSlDoGpaa7So5Hl9WfWFx/IdtrVp4YodWrvjsEYUZjjurZ2VlWq4/wG1fvSRbMOL9OqB/9PAjOO8Xl9Na42WbX/ZkcZbvr7E6p7fVNOsTa9sU9bATCVbk2Kejzt/bRhJznXe3N7Zrf7t15iUlqSSd7Z3u1Z/+0PVVNOs9Ys3at+XB5Q7OFsdLR0uvwdyDvc8nI9xn4d6SnuovFrv37tSuUOz1ZKY4LOePJ3b3jc6Wjq06ZVtOpKdohe++NrluGj2efcxGmmh5B9KnAjnuHC5j3FfY94eQ/d/uEov2TI0eECOSzk9tbVzH0vLSfV4znDt3VenuU99pg0VNo0cmNWt7rz1Qfv2jJREvfz5vpD7aCB9vLyuXI+8e6uGPPOetHKtksaMDuj+7q1f+Noe7XuMGeforKxU5dyH9PmiTTqwp1V5x/dTsjUprHtHeV257lk3V8OyhisnJcdlXyD36mjHm2DYy5KWlK6Xty8Nuj7K68p1/4r71PaedGhzjXIHZ6sxzubz+nYcrtBfli3X1n1tKshIDWvMRIK/fug8j5dhOOZ/7mOvqaZZXy5YoYb7H5C+XKukogJTPmfwNtd3395ZWalDDz+ujRva9dW2cq00PtKgnEE+29reP3KP5KvsjXKPsdZXHHaP2851XV3ZqCX/WK3CwTnKykrxeP5ozR0Dma8FO4/yFRfC4Ryjk1MP6eEN97mcI5LxJFJ5BVoXznE425LtiEHZlmy9veutiF3Tm2ueVu7jy2R7f402bZUOrilVynsvyTLq+IDGZmdlpfY9Pk9vJm7TcbnDekw9S9EbI9Hgraz2OKHCVD3/xX6t3XFYA5MTtW3plqDGoDNf93hf/dNfbLDHsJT4Ti145EG/6wZ96n9MlFc16ss9tSqvanTZftjWpic+3KnDtjaPv+MbnVVVsj3woDrKyjz+3VlVFesiAjBJR1mZ2tesUUdZWa89nz2m+YtdgaaLpNrWGn247UNpVaJKVpXprf1v6NWdy1TbWmNaGQJV21qjV3cu81q+w7Y2LV69R4tX73G5t3ZWValpwUI1LVio6spdWlK6yOf11bbWuKTxlq8vsbrnN9e2aP2SzWqubekR+bjz14aR5Fznnurffo21++o9Xqu//aFqrm3Rple/0uZXv1JzbUu330PJw5n7PNRT2tp99Tq4tUq1++r91pOnc9vrw/7z/oMN3Y6LZp93H6M9If9Q4kQ4x4XLfYz7GvP2GHrg5Tf05OeV3crpqa2d+5i3c4Zr/8EGraht0Utf7vdYd976oH17eVVjWH00kD6+17ZHFbs3K/GZF9S0YGHA93dv/cLX9mjfY8w4R2dVlWr//ZrKDmVry3t7v4lZYdw79tr2aGv1Fu217em2L5B7dbTjTTDsZdlr2xNSfey17dHu/Xu0952DjnuCv+vbXV2tDWWpWvZZZdhjJhL89UPnebzz/M997DXXtqj89S+Vteo1tT/7lGmfM3ib67tv76yqUvVTi7Xlvb3a8MlmLd37ot+2trdlVeUhr7HWVxx2j9vOdV1xoEFau//o315Ea+4YyHwt2HmUr7gQDucY/dXhXd3OEcl4Eqm8Aq0L5zjsHIP22vZE9JpWfPGCOp94TrX/fk1b3tur8te/VNujjwQ8NjurqnRgyVP699ev9ah6lqI3RqLBW1ntcWLP7lpHX6840BD0GHTJ08c93lf/9Bcb7DFs16GmgMrRpxYmAAAAAAAAAABAz8bCBAAAAAAAAAAAMM0x9QULtpYO1Ta1y9bSEeui9HhGY6PP3wGgN+mqq1dndbXP/TBHY3uj6ts813dje+TuNbG657c1tqulvjWs4/sKf3Xf0Rze/r4i2D7qqY/Y+3so+YXC1zgON99jhT1WBDPmndvZ/nso5wxXoGPTX3nd9weqJz7LhXotgebdVwVyr45WvAlGNGOTt+trOdLcbVs0+5k/0e6H/ubqkcg/kPN7SuevD7r3D0+xNpT5na2lQ81tRyRJHU0dXuN3tOeOfWk+Fol40tPmKtG+pkDHpvPY6an1HKl5UDT1pmdBf7Gh5T/xy59jamHiqmc/j3UReo36G/7i83cA6E2qL/h5rIuA/7h51Y2mnCdW9/w3b3k/Juftify1wcr568La31cE21c99TGz+7tZ47gvCyVWhNvOkYpPh62J0ij/X1rqr7x96dmsL12LmQKpt74eb7xdX0dzP0mu89e+3M9iPVd3OX/qAJd9wfbBSMXaq579XHnNHfqBpM0Pr9bmiOQavL40H+uL8SRS11TsZXtQY7P46Jcn99R65jktsvzFhr+/WxpQPrzKCQAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGmOqe+YmHfRVI0oytCOCluffj9jJGTdfZfL90q4/w4AvUneksVKGjvG6/6ObSUxf7ftseKOGXdqSNZQj/t21++K2DtJY3XPn3n7Gcob4v/d695U767tM+8/nXfRVEne34l98hUn+nw3qb/9fYW/enI38/YzJLm+J9fe3yWZ0ud9jeNwRDIG9HT2WBHMmHduZyn4tg43Ptmt+3K/Xl+xw286f+V13x+onvgsF+q1BKInXm+kBHKvjla8CUY0Y5O36/t4R5nuLK1x2RbNfuZPtPuhv7l6uPzN9e3n79hWovrL/uCyz18fdO8fnmJtKPO7eRdNVfPX9frqgVWacPV0TZ460GO6aM8d+9J8LBLxpKfNVSJ1TQv2/s7jvkDHZse2Eu297rKIlinS9RypeVA09aZnQX+x4dqzR+nCv/nP55hamMiwJiknLVkZ1qRYF6XHi0tP9/k7APQm8dlZSsjL87q/MzvLxNIc29KT05Vl8Vzf6cmRu9fE6p5vSU+WNSslrOP7Cn91n5Qa3v6+Itg+6qmP2Pt7KPmFwtc4DjffY4U9VgQz5p3b2f57KOcMV6Bj01953fcHqic+y4V6LYHm3VcFcq+OVrwJRjRjk7frsyamSnJdmIhmP/Mn2v3Q31w9XP7m+vbze0rnrw+69w9PsTaU+V2GNUmyHP3ILiktyWv8jvbcsS/NxyIRT3raXCXa1xTo2HQeOz21niM1D4qm3vQs6C82WC2BLTnwKicAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYJmHOnDlzQjmwra1Nc+fO1Q033CCLxRLhYoWmyzC0+1CTvjdpgPIyXMtkTU7QlKG5Sv3Pl2+4/45vxKWlyTJtmhIK+nX/e/p0xfNF2MCxobNLR3bsUOpPfqyEgoJee764tLSAYleg6SKp5UiLDEMaNmqwio4v0NiCcZpceIKsiVbTyhAow5BG547xWj7DMDShOFv/b0S+y73VMAwlT5kiyyknKzUzTxP6TfR5fSkJVpc03vL1JVb3/CRrogaML1RymF8QGal83Plrw0hyrnNP9Z9kTVTR2AKl5lg9Xqu//aEzVDA6X8WTByjJmuTye+Dn8HyM53moa9quri7Vfd2g478zVGk5qX7ryZ29byRZk5RkTVT/cYXKzEzpdlw0+7z7GO0J+YcSJ8I5LlzuY9zXmDcMQ4ljxih76iRNGVXUrZzube3ex7ydMxxGl6HDu2o0dUyhZowu8Fh33vqgNTlBkwfnKDfdElYf9dfHuwxDu+p3aUL+RGVMm6GU004L+P7u637mabsZ9xgzztHV1CTDkPrPGKnibw929JVQ7x1dhqF9tn36bvEZyknJ6bY/kHt1tONNMFISrBqXN16WhJSg6+NofyzXhPyJGjTuOMc9yNf1dalL5XW7NH34cZoxojDsMRMJPvuhyzy+n2P+52nsdbR2yDCkzO/MUOp3TjPtcwZvc/1u2w0pccxo5R3fT8WjBupb/Sf5beuUBKvG5Y9TRnq611jrLQ57itv2uk5JStD2miZNPmmIsnx8aW+05o7+52PBzaP8xYVw2GP0lKHZOtR6oNs5IhlPIpFXMHVhj8OT+k12xKBJ/SYr25ITuRhpSEOyhihl3AQlTZyovJH91G/iIKWeelLg909JmeMnacKAqT2mnu2iNUaiwVNZ7XFi+CmDZc1K0YTibJ04LE/W5PgQnmW+4e0e769/+osN1uQEjS1K1YJHHvS7bhBnGIYRdMklNTQ0KCsrS/X19crMzAwlCwAAAAAAAAAA0EcEum7Aq5wAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAA9VmdlpRruf0CdlZUuP/vTvmWrDv34p2rfstWEUsKbmtYaPbH5cT2x+XHVtNbEujh9UlNNsz5fvFGHyqv1+eKNaqppjllZDtva9NA7X+mhd77SYVtbzMrRW/iKaZ72BTKeDtvatHDFDo/172ufJ2UHG/TbJz9T2cGGAK8odOV15brhk+tVXlce9XMhcE01zVr95Oda9eTnQceWcI6Vjo6ButtuV91ttwd03wd6AuY96Cv89eWa1hotKnleNa01Lj/jG9GeFzvP64Kd40WT4/5999yA0rMwAQAAeqzOqirZHnhQnVVVLj/701FWpvY1a9RRVmZCKeFNbWuNXt25TK/uXKZaHlaiorm2ReuXbFbtvnqtX7JZzbUtMSvLYVubFq/eo8Wr9/SIB6OezldM87QvkPF02NamJz7c6XVhwts+T8qrGvXlnlqVVzUGeEWh22vbo63VW7TXtifq50LgmmtbtOnVr7T51a+Cji3hHCsdHQNNCxaqacHCgO77QE/AvAd9hb++XNtaoyWli1TbWuPyM74R7Xmx87wu2DleNNnv381PPxNQehYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmCYx1gUAAADwp6uuPtZFAHq0juaOWBcBIeqqq1dndXW3beGwtXSotqm92zYAAIBIaWyP/vdQwbfePr9jYQIAAPR41Rf8PNZFAHq0lfPXxboICFE04ttVz34e8TwBAACc3bzqxlgX4ZjX2+d8vMoJAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAafiOCQAA0OPlLVksie+aALw5+YoT+Z6JXipvyWIljR3jsq1jW0lY8W7eRVM1oijDZduOCluvfw8xAADoOe6YcackvmsiluZdNFVS7/2uCRYmAABAjxefnRXrIgA9WlJqUqyLgBDFZ2cpIS/PZVtnmDEvw5qknLTkbtsAAAAiJT05PdZFOOb19vkdr3ICAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaRLmzJkzJ5QD29raNHfuXN1www2yWCwRLhYAAMBRcWlpskyfrvi0tG9+TvfzRWudXTqyY4dSf/JjJRQUmFNQeGQY0ujcMZpceIKsidZYF6dPSrImqmhsgVJzrBowvlDJMfwSPMMwNKE4W/9vRL5SLYkxK0dv4SumedoXyHiyJidoytBcj/Xva5+7LsPQ7kNN+t6kAcrLiO7zXpdhaJ9tn75bfIZyUnKiei4Ey1DB6HwVTx4QQmwJ59ij8SR5yhSlnHaa//s+0EMw70Ff4a8vpyRYNaHfRFkTrS4/4xvRnhc7z+uCmeNFm2EY6ho/Xn9fudLvukGcYRhGKCdpaGhQVlaW6uvrlZmZGXJhAQAAAAAAAABA7xfougGvcgIAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZJjHUBAAAAAISms7JStvmPSZIyrrhcCYWFMS4RgL6C+AIA8KapplmbXtkmQ9K3zh+rtNzUWBcJvRALEwAAAEAv1VlVpaYFCyVJqT/6Lz44BBAxxBcAgDfNtS3a9OpXkqTjTxvKwgRCwqucAAAAAAAAAACAaViYAAAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAAAAAAAACmYWECAAAAAAAAAACYhoUJAAAAAAAAAABgGhYmAAAAAAAAAACAaViYAAAAAAAAAAAApkmMdQEAAAAAhCahoEBpv/m142cAiBTiCwDAm9Qcqyb+cLSM//wMhCLOMAwjlAMbGhqUlZWl+vp6ZWZmRrpcAAAAAAAAAACgFwl03YBXOQEAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANCxMAAAAAAAAAAAA07AwAQAAAAAAAAAATMPCBAAAAAAAAAAAMA0LEwAAAAAAAAAAwDQsTAAAAAAAAAAAANOwMAEAAAAAAAAAAEzDwgQAAAAAAAAAADANCxMAAAAAAAAAAMA0LEwAAAAAAAAAAADTsDABAAAAAAAAAABMw8IEAAAAAAAAAAAwDQsTAAAAAAAAAADANImhHmgYhiSpoaEhYoUBAAAAAAAAAAC9k329wL5+4E3ICxM2m02SVFxcHGoWAAAAAAAAAACgj7HZbMrKyvK6P87wt3ThRVdXlw4cOKCMjAzFxcWFXEAAiKSGhgYVFxdr3759yszMjHVxAMCB+ASgJyI2AeipiE8AeiJik3+GYchms2nAgAGKj/f+TRIh/4+J+Ph4HXfccaEeDgBRlZmZyQ0CQI9EfALQExGbAPRUxCcAPRGxyTdf/1PCji+/BgAAAAAAAAAApmFhAgAAAAAAAAAAmCZhzpw5c2JdCACIpISEBJ1++ulKTAz5bXUAEBXEJwA9EbEJQE9FfALQExGbIiPkL78GAAAAAAAAAAAIFq9yAgAAAAAAAAAApmFhAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmADQ491999068cQTlZGRoYKCAp1//vkqLS11SdPa2qrZs2crLy9P6enp+vGPf6zKykqXNHv37tXMmTOVmpqqgoICXXfddTpy5IiZlwKgD5s7d67i4uJ07bXXOrYRmwDEyv79+/XLX/5SeXl5slqtmjBhgj7//HPHfsMwdMstt6h///6yWq0688wztX37dpc8ampqNGvWLGVmZio7O1u/+tWv1NjYaPalAOhDOjs7dfPNN2vo0KGyWq0aPny47rjjDhmG4UhDfAIQbR9//LG+//3va8CAAYqLi9Mrr7zisj9ScWjTpk065ZRTlJKSouLiYt1zzz1Rv7behIUJAD3eRx99pNmzZ2vNmjVavny5Ojo6dPbZZ6upqcmR5ne/+51ef/11vfjii/roo4904MAB/ehHP3Ls7+zs1MyZM9Xe3q5Vq1bpmWee0dNPP61bbrklFpcEoI9Zt26dHnvsMU2cONFlO7EJQCzU1tbqpJNOUlJSkt5++21t27ZN999/v3Jychxp7rnnHj388MOaP3++1q5dq7S0NJ1zzjlqbW11pJk1a5a2bt2q5cuX64033tDHH3+s3/zmN7G4JAB9xN/+9jc9+uijeuSRR1RSUqK//e1vuueeezRv3jxHGuITgGhramrSt771Lf3jH//wuD8ScaihoUFnn322Bg8erPXr1+vee+/VnDlztGDBgqhfX69hAEAvU1VVZUgyPvroI8MwDKOurs5ISkoyXnzxRUeakpISQ5KxevVqwzAM46233jLi4+ONiooKR5pHH33UyMzMNNra2sy9AAB9is1mM44//nhj+fLlxmmnnWZcc801hmEQmwDEzvXXX2+cfPLJXvd3dXUZRUVFxr333uvYVldXZ1gsFmPx4sWGYRjGtm3bDEnGunXrHGnefvttIy4uzti/f3/0Cg+gT5s5c6Zx2WWXuWz70Y9+ZMyaNcswDOITAPNJMpYtW+b4PVJx6J///KeRk5Pj8lx3/fXXG6NGjYr2JfUa/I8JAL1OfX29JCk3N1eStH79enV0dOjMM890pBk9erQGDRqk1atXS5JWr16tCRMmqLCw0JHmnHPOUUNDg7Zu3Wpi6QH0NbNnz9bMmTNdYpBEbAIQO6+99pqmTp2qn/70pyooKNDkyZO1cOFCx/5du3apoqLCJT5lZWVp2rRpLvEpOztbU6dOdaQ588wzFR8fr7Vr15p3MQD6lBkzZuj9999XWVmZJGnjxo1auXKlzjvvPEnEJwCxF6k4tHr1ap166qlKTk52pDnnnHNUWlqq2tpak66mZ0uMdQEAIBhdXV269tprddJJJ2n8+PGSpIqKCiUnJys7O9slbWFhoSoqKhxpnD/4s++37wOAUCxZskRffPGF1q1b120fsQlArJSXl+vRRx/V73//e/3lL3/RunXrdPXVVys5OVkXX3yxI754ij/O8amgoMBlf2JionJzc4lPAEL25z//WQ0NDRo9erQSEhLU2dmpO++8U7NmzZIk4hOAmItUHKqoqNDQoUO75WHf5/yKzWMVCxMAepXZs2dry5YtWrlyZayLAuAYt2/fPl1zzTVavny5UlJSYl0cAHDo6urS1KlTddddd0mSJk+erC1btmj+/Pm6+OKLY1w6AMeyF154Qc8//7wWLVqkcePGacOGDbr22ms1YMAA4hMAHGN4lROAXuPKK6/UG2+8oRUrVui4445zbC8qKlJ7e7vq6upc0ldWVqqoqMiRprKystt++z4ACNb69etVVVWlE044QYmJiUpMTNRHH32khx9+WImJiSosLCQ2AYiJ/v37a+zYsS7bxowZo71790r6Jr54ij/O8amqqspl/5EjR1RTU0N8AhCy6667Tn/+8591wQUXaMKECbrwwgv1u9/9Tnfffbck4hOA2ItUHOJZzz8WJgD0eIZh6Morr9SyZcv0wQcfdPuvcFOmTFFSUpLef/99x7bS0lLt3btX06dPlyRNnz5dmzdvdrlxLF++XJmZmd0e3AEgEGeccYY2b96sDRs2OP5MnTpVs2bNcvxMbAIQCyeddJJKS0tdtpWVlWnw4MGSpKFDh6qoqMglPjU0NGjt2rUu8amurk7r1693pPnggw/U1dWladOmmXAVAPqi5uZmxce7fhSVkJCgrq4uScQnALEXqTg0ffp0ffzxx+ro6HCkWb58uUaNGsVrnP4jYc6cOXNiXQgA8GX27Nl6/vnn9dJLL2nAgAFqbGxUY2OjEhISlJSUpJSUFB04cECPPPKIJk2apJqaGl1++eUqLi7WrbfeKkkaNmyYli5dqvfee08TJ07Uxo0bddVVV+mKK67QOeecE+MrBNAbWSwWFRQUuPxZtGiRhg0bposuuojYBCBmBg0apNtuu02JiYnq37+/3nnnHc2ZM0d33HGHJk6cqLi4OHV2duquu+7S2LFj1d7erquvvlrNzc2aN2+eEhMT1a9fP61du1aLFy/W5MmTtXv3bl1++eU6++yzdckll8T6EgH0UiUlJXrmmWc0atQoJScna8WKFfrLX/6iX/ziFzrrrLOITwBM0djYqG3btqmiokKPPfaYpk2bJqvVqvb2dmVnZ0ckDo0cOVKPPvqotm7dqpEjRzri3W233aYpU6bEtgJ6CgMAejhJHv889dRTjjQtLS3G//7v/xo5OTlGamqq8V//9V/GwYMHXfLZvXu3cd555xlWq9XIz883/vCHPxgdQbkO7AAABAxJREFUHR0mXw2Avuy0004zrrnmGsfvxCYAsfL6668b48ePNywWizF69GhjwYIFLvu7urqMm2++2SgsLDQsFotxxhlnGKWlpS5pqqurjZ///OdGenq6kZmZaVx66aWGzWYz8zIA9DENDQ3GNddcYwwaNMhISUkxhg0bZtx4441GW1ubIw3xCUC0rVixwuPnTBdffLFhGJGLQxs3bjROPvlkw2KxGAMHDjTmzp1r1iX2CnGGYRgxWhMBAAAAAAAAAADHGL5jAgAAAAAAAAAAmIaFCQAAAAAAAAAAYBoWJgAAAAAAAAAAgGlYmAAAAAAAAAAAAKZhYQIAAAAAAAAAAJiGhQkAAAAAAAAAAGAaFiYAAAAAAAAAAIBpWJgAAAAA4Ncll1yi888/P9bFAAAAANAHJMa6AAAAAABiKy4uzuf+W2+9VQ899JAMwzCpRAAAAAD6MhYmAAAAgGPcwYMHHT//+9//1i233KLS0lLHtvT0dKWnp8eiaAAAAAD6IF7lBAAAABzjioqKHH+ysrIUFxfnsi09Pb3bq5xOP/10XXXVVbr22muVk5OjwsJCLVy4UE1NTbr00kuVkZGhESNG6O2333Y515YtW3TeeecpPT1dhYWFuvDCC3X48GGzLxkAAABADLEwAQAAACAkzzzzjPLz8/XZZ5/pqquu0m9/+1v99Kc/1YwZM/TFF1/o7LPP1oUXXqjm5mZJUl1dnb773e9q8uTJ+vzzz/XOO++osrJSP/vZz2J8JQAA/P/27h+lmSCO4/B3/QOWgoLEKk0kTZR4CLscwDJFKhvbQEgZsE/tDXKAVKmsrLQU9gKCVmnX2AnyvpXgKPg81e4Uy2/L4TMwAJQkTAAAAF9ydnaWyWSSTqeT8Xicvb29HB4eZjQapdPpZDqd5uXlJY+Pj0mS+Xyefr+f2WyWbrebfr+f29vbrFarPD09/fDfAAAApbhjAgAA+JLT09OP5+3t7RwcHKTX632sHR0dJUmen5+TJA8PD1mtVv+9r6Ku65ycnHzzxAAAwG8gTAAAAF+yu7v76b2qqk9rVVUlSd7e3pIk6/U6g8EgNzc3/3yr1Wp946QAAMBvIkwAAABFnJ+fZ7FYpN1uZ2fHVgQAAP4qd0wAAABFXF1d5fX1NZeXl7m/v09d11kulxkOh2ma5qfHAwAAChEmAACAIo6Pj3N3d5emaXJxcZFer5fr6+vs7+9na8vWBAAA/opqs9lsfnoIAAAAAADgb3AsCQAAAAAAKEaYAAAAAAAAihEmAAAAAACAYoQJAAAAAACgGGECAAAAAAAoRpgAAAAAAACKESYAAAAAAIBihAkAAAAAAKAYYQIAAAAAAChGmAAAAAAAAIoRJgAAAAAAgGKECQAAAAAAoJh3bv8p1u6sZCgAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAl9UlEQVR4nO3de3RV5Z0//k8gEEJIjpAYAnJzitzECpWZqnTVajtKRwutq/WCIkjrZU3VTlupdrqsOi67rFOko1ZbrTq9UGnroF+trbd6RSq2CK1WRlFBRblowBAEApL9+4OfZ4xccjsnJzu8XmtlLbL3s5/97PPs8znn8M7ZuyhJkiQAAAAAAABSrFuhBwAAAAAAANBeAg8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2Bx27MmDEjioqKdvl56aWX9rhu0qRJ2e2HDRu22zZXXXVVts1rr70Wn/vc56KsrCyqqqriggsuiG3btmXXb926NWbMmBGHHHJIFBcXx+c///ldxrlgwYKYOHFiVFZWRmlpaYwaNSrmzJmT18cm7dIytxERDQ0N8Z3vfCeGDh0aJSUl8ZGPfCRuvfXWvD02aff+/J177rm7rPvXf/3XKCoqihkzZjRp21nm+n1PPvlkFBcXx7hx49r1WHR1uZ7riIiFCxfGv/zLv0Tfvn2jV69eccghh8Ts2bNjx44dTdpt2LAhpk2bFplMJjKZTEybNi3eeeedJm2+9rWvxWGHHRYlJSW7ncuVK1fudkz33Xdfux4XAAAAgH1dcSF2uuHdbc03yqG+ZT1bvc2kSZPitttua7Js//333+O6kpKSJr//x3/8R5x11llNlpWXl0dExI4dO+L444+P/fffPxYsWBC1tbUxffr0SJIkrrvuumyb0tLSuOCCC+J//ud/djvGsrKyOO+88+KjH/1olJWVxYIFC+Kcc86JsrKyOPvss1t9zLlQ11DXYfvKlGTatF0a5jYi4qSTToq1a9fGLbfcEsOHD49169bFe++916Zjbq8tdVs7dH+lmV5t2m7w4MExb968mDNnTpSWlkbEztDh9ttvjyFDhjRp25nmOiKirq4uzjjjjPj0pz8da9eubf3B58iO2toO3V/3yso2bZfLub7zzjvjpJNOijPPPDMeeeSR2G+//eKhhx6Kb33rW/HUU0/Fb37zmygqKoqIiKlTp8aqVauy4cTZZ58d06ZNi3vuuSfbX5IkMXPmzFi0aFH87W9/2+MxPPTQQ3HwwQdnf+/Xr1+bHgsAAAAAdipI4PHZqx/p0P09dflxrd6mpKQkampqWr3ufeXl5Xts88ADD8Tzzz8fr7/+egwcODAiImbPnh0zZsyIK6+8MioqKqKsrCxuvPHGiNj5V98f/gviiIjx48fH+PHjs78PGzYs5s+fH0888UTBAo9pf5jaYfu6+/P3tmm7NMztfffdF4899li88sor2f8EHTZsWAuPMPd+fsYdHbq/c/7f6W3a7mMf+1i88sorMX/+/DjttNMiImL+/PkxePDg+Id/+IcmbTvLXL/vnHPOialTp0b37t3jrrvuauER596aj47r0P0d8MbrbdouV3P97rvvxllnnRWTJ0+Om266Kbv8K1/5SvTv3z8mT54cv/nNb+Lkk0+OZcuWxX333RdPPfVUfPzjH4+IiJtvvjmOOOKIeOGFF2LkyJEREXHttddGRMRbb72118CjsrKy2XMQAAAAgJZzSasC+NOf/hRjx47N/idpRMRxxx0XDQ0NsXjx4jb3u2TJkli4cGEcddRRuRgmbZCrub377rtjwoQJcfXVV8cBBxwQI0aMiAsvvDC2bNmSj2F3KWeeeWaTv+a/9dZbY+bMmTnfTy6fx7fddlu8/PLLcemll+Z6mF1aLub6gQceiNra2rjwwgt3Wfe5z30uRowYEbfffntE7JzzTCaTDTsiIg4//PDIZDKxcOHCVo9/8uTJUV1dHRMnTow77ujYUBEAAACgKxJ47MHvfve76NOnT/bnS1/60h7X9enTJ6644oom21900UW7tHn00UcjImLNmjXRv3//Ju379u0bPXv2jDVr1rR6rIMGDYqSkpKYMGFCfPWrX42vfOUrrT/gfUga5vaVV16JBQsWxHPPPRd33nln/PCHP4w77rgjvvrVr7b9wPcR06ZNiwULFsTKlSvj1VdfjSeffDJOP33Xb4x0lrlevnx5XHzxxTF37twoLi7Il+5SKxdz/eKLL0ZExOjRo3e7j1GjRmXbrFmzJqqrq3dpU11d3ao579OnT1xzzTVxxx13xO9///v49Kc/HSeffHL88pe/bHEfAAAAAOzK/67twdFHH529FE3Ezvtl7GldxK7XXp81a1b2prnvO+CAA7L/fv968B+UJMlulzfniSeeiE2bNsVTTz0VF198cQwfPjxOPfXUVvezr0jD3DY2NkZRUVHMnTs3Mpmd9yq55ppr4otf/GL86Ec/yt6zgF1VVVXF8ccfHz/72c8iSZI4/vjjo6qqapd2nWGud+zYEVOnTo3LL788RowY0aJt+D+5nOskSXa7jw/PZy6e31VVVfH1r389+/uECRNiw4YNcfXVV+82sAEAAACgZQoSePzhW0cXYretUlZWFsOHD2/1uvdVVVXtsU1NTU0sWrSoybINGzbE9u3bd/mL8ZY48MADIyLikEMOibVr18Zll11WsMDjF5/9VUH22xppmNsBAwbEAQcckA07Inb+BXqSJLFq1ao46KCDWtxXLpzx8y926P7aa+bMmXHeeedFRMSPfvSj3bbpDHNdX18ff/nLX2LJkiXZ8TY2NkaSJFFcXBwPPPBAHHPMMS3qK1dq/ra0Q/fXXu2d6/eDpmXLlsWRRx65y/r//d//jTFjxkTEzjnf3Q3l33rrrTbV7g86/PDD46c//Wm7+gAAAADY1xUk8Ohb1rMQu+00jjjiiLjyyitj9erVMWDAgIjYeR35kpKSOOyww9rVd5Ik0dDQkIthtkmmJNN8oy4sV3M7ceLE+O1vfxubNm2KPn36RMTOS+9069YtBg0alJex701ppleH77M9Jk2aFNu2bYuInffVyIdczHVFRUU8++yzTZbdcMMN8fDDD8cdd9yRDTM7UvfKyg7fZ3u0d66PPfbY6NevX8yePXuXwOPuu++O5cuXZy9/dcQRR0RdXV08/fTT8U//9E8REbFo0aKoq6vbbVjSGkuWLMmeRwAAAAC0jUtatUFDQ8Mu12svLi5ucimV+vr6Xdr07t07Kioq4thjj40xY8bEtGnT4j//8z9j/fr1ceGFF8ZZZ50VFRUV2fbPP/98bNu2LdavXx/19fWxdOnSiIgYN25cROz8a+YhQ4bEqFGjIiJiwYIF8YMf/CDOP//8PBz1vqGzzO3UqVPjiiuuiDPPPDMuv/zyePvtt2PWrFkxc+ZMl7Nqge7du8eyZcuy/96dzjDX3bp1i7Fjxzbpv7q6Onr16rXLcnavvXNdVlYWP/nJT+KUU06Js88+O84777yoqKiIP/7xjzFr1qz44he/GCeddFJE7PyW1aRJk+Kss86Kn/zkJxERcfbZZ8cJJ5wQI0eOzPb90ksvxaZNm2LNmjWxZcuW7JyPGTMmevbsGT/72c+iR48eMX78+OjWrVvcc889ce2118b3v//9XD88AAAAAPuWhF1Mnz49mTJlyh7XRcQuPyNHjsy2GTp06G7bnHPOOdk2r776anL88ccnpaWlSb9+/ZLzzjsv2bp1a5N97amf91177bXJwQcfnPTu3TupqKhIxo8fn9xwww3Jjh07cvuAdCFpmdskSZJly5Yln/nMZ5LS0tJk0KBByTe+8Y1k8+bNuXswupi9zW2SJMmUKVOS6dOnZ9t2prn+oEsvvTQ59NBD2/QY7CtyPddJkiSPP/54MmnSpCSTySQ9e/ZMxowZk/zgBz9I3nvvvSbtamtrk9NOOy0pLy9PysvLk9NOOy3ZsGFDkzZHHXXUbve5YsWKJEmS5L//+7+T0aNHJ717907Ky8uTww47LPnFL37R3ocFAAAAYJ9XlCR7uFMrAAAAAABASnQr9AAAAAAAAADaS+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6hXnq+PGxsZ48803o7y8PIqKivK1GwAAAAAAIAWSJIn6+voYOHBgdOuW++9j5C3wePPNN2Pw4MH56h4AAAAAAEih119/PQYNGpTzfvMWeJSXl0fEzoFXVFTkazcAAAAAAEAKbNy4MQYPHpzND3Itb4HH+5exqqioEHgAAAAAAAAREXm7DYablgMAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9vAce767fnO9dQJe0avUb8YPfzo5Vq98o9FDoYtZvXR+/WjY31m9dHxE76/Rfbv9rTut1PvpMsx1r18bG2dfEjrVrCz2UnOkKx5Sr8zRN5/uHn//tbQe5lo9z74N9vl3fEDc/8lK8Xd+Qs/7Zt3XkOaU2A+RPV6mxXeU4IrrWsdCx8h54bHlna753AV3Smto18XiPh2NN7ZpCD4UuZsPW9THvhV/Fhv//TcPmDVti8bxnY/OGLTnbRz76TLMd69ZF/TVzYse6dYUeSs50hWPK1XmapvP9w8//9raDXMvHuffBPt+ub4hbHn1Z4EHOdOQ5pTYD5E9XqbFd5Tgiutax0LFc0goAAAAAAEg9gQcAAAAAAJB6xfneQcO722JLnctaQWtt27w9IiLefe/dqGuoK/Bo6Eo2bdu02+UNm3JXrxs2bctJP11N4zt1saO2ttDDyInGd7pOXWrvuZ/G833Ttk17fW3ZU52AjtLcOdravj6sfsv22PBu+p67dD71W7Z3+D5z+fwAYKeu9v63K7xWdLU5oePkPfC4/8rHorRHab53A11OXeX6iMkRs1d+P2JloUfDvuDe7/6x0EPo8mpPObXQQ2A39sVz/5KF3yn0EGCv8n2Onv/zv+S1f8gnNRyA5nitYF/mklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpl/d7eBz3naNi2Ngh+d4NdDlLXlgaC968P7457KIYN/rQQg+HLmRl3YrdXs/z+P/4dFQO65uTfdSu3LBP3hehOZXzbo8eY0YXehg5sf35ZV3mniTtPffTeL5fceSVMSxz4B7X76lOQEdp7hxtjd2dz9edMSGG15TnpH/2bS+tqe/we8Lk8vkBwE5d7f1vV3it6GpzQsfJe+BRUtYzSjO98r0b6HJ69u4RERFlxWWRKckUeDR0JX169tnt8pI+uavXJX165qSfrqbbfpnoXllZ6GHkxI79uk5dau+5n8bzvU/PPnt9bdlTnYCO0tw52tq+Pqy8tEf0LUvfc5fOp7y0R4fvM5fPDwB26mrvf7vCa0VXmxM6jktaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASL28Bx6l+7lhObRFTWVNfHL7MVFTWVPoodDF9O3VL04ZOTX69uoXERG9+5bGYaccEr37luZsH/noM826V1dH+Te+Ht2rqws9lJzpCseUq/M0Tef7h5//7W0HuZaPc++DfVaVl8SXP/WRqCovyVn/7Ns68pxSmwHyp6vU2K5yHBFd61joWEVJkiT56Hjjxo2RyWSirq4uKioq8rELAAAAAAAgJfKdG7ikFQAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw6ALWb10fv1o2N9ZvXd8p+sm33Y2zvWN/u74hbn7kpXi7viEn7XIlX/vr6OMg/8xpunWW+ess4wA6H/WBQmnJ+/y2fBbYsXZtbJx9TexYu7ZN42rv9sBOrXl98VrUsQrxeJvj3POY0hnV5vl8FHh0ARu2ro95L/wqNrQzqMhVP/m2u3G2d+xv1zfELY++3KLAoyXtciVf++vo4yD/zGm6dZb56yzjADof9YFCacn7/LZ8Ftixbl3UXzMndqxb16ZxtXd7YKfWvL54LepYhXi8zXHueUzpjGo3CTwAAAAAAAD2SuABAAAAAACknsADAAAAAABIveJCD4Dc2bRtU9Q11LVr+zT54PHmauz1W7bHhne37XV9ITQ3rrb0R9eU63OFjtHZnpPOI+DDOludYt+zt8867fks0PhOXeyorW3TdkDutOT9p9eiwujIzwbmOH98xqMzqd/yXl77F3h0IZcs/E6hh9Ch8nG85//8LznvMxc667jofJwr5ILzCIDOJl+fdWpPOTUv/QKt4/1n52VuugbzSGfyXsO7ee3fJa0AAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSzz08upArjrwyhmUObPP2K+tWpOo+IB883lyN/bozJsTwmvI9rn9pTX1BrnvY3Lhaq1DHQf7l+lyhY3S256TzCPiwzlan2Pfs7bNOez4LVM67PXqMGd3q7bY/v8z9PyCHWvL+02tRYXTkZwNznD8+49GZLF3+Zhzz/fz1L/DoQvr07BOZkky7tk+TDx5vrsZeXtoj+pb13Ov6QmhuXG3pj64p1+cKHaOzPSedR8CHdbY6xb5nb5912vNZoNt+meheWdnq7Xbs1/bPXcCuWvL+02tRYXTkZwNznD8+49GZlJfmN5JwSSsAAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3h0AX179YtTRk6Nvr36dYp+8m1342zv2KvKS+LLn/pIVJWX5KRdruRrfx19HOSfOU23zjJ/nWUcQOejPlAoLXmf35bPAt2rq6P8G1+P7tXVbRpXe7cHdmrN64vXoo5ViMfbHOeex5TOqLJPfs/HoiRJknx0vHHjxshkMlFXVxcVFRX52AUAAAAAAJAS+c4NfMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/DYh7xd3xA3P/JSvF3f0Kb1AEDE+q3r41fL5sb6retbtBwAAADoGAKPfcjb9Q1xy6Mv7zXw2Nt6ACBiw9b1Me+FX8WGDwUbe1oOAAAAdAyBBwAAAAAAkHoCDwAAAAAAIPWKCz0AOl79lu2x4d1tu10OALTMpm2boq6hrsnvAAAAQOEIPPZB5//8L4UeAgCk3iULv1PoIQAAAAAf4JJWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqeceHvug686YEMNryndZ/tKaevf3AIAWuuLIK2NY5sDs7yvrVrivBwAAABSQwGMfVF7aI/qW9dztcgCgZfr07BOZkkyT3wEAAIDCcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw2IdUlZfElz/1kagqL2nTegAgom+vfnHKyKnRt1e/Fi0HAAAAOkZRkiRJPjreuHFjZDKZqKuri4qKinzsAgAAAAAASIl85wa+4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1ivPVcZIkERGxcePGfO0CAAAAAABIiffzgvfzg1zLW+BRW1sbERGDBw/O1y4AAAAAAICUqa2tjUwmk/N+8xZ49OvXLyIiXnvttbwMHEiHjRs3xuDBg+P111+PioqKQg8HKAB1AFAHgAi1AFAHgIi6uroYMmRINj/ItbwFHt267bw9SCaTUcCAqKioUAtgH6cOAOoAEKEWAOoA8H/5Qc77zUuvAAAAAAAAHUjgAQAAAAAApF7eAo+SkpK49NJLo6SkJF+7AFJALQDUAUAdACLUAkAdAPJfB4qSJEny0jMAAAAAAEAHcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqdfqwOONN96I008/PSorK6N3794xbty4WLx4cXZ9kiRx2WWXxcCBA6O0tDQ+9alPxd///vcmfTQ0NMT5558fVVVVUVZWFpMnT45Vq1a1/2iADtFcHZg/f34cd9xxUVVVFUVFRbF06dJd+lAHIP32Vgu2b98eF110URxyyCFRVlYWAwcOjDPOOCPefPPNJn2oBZBuzb0nuOyyy2LUqFFRVlYWffv2jc985jOxaNGiJn2oA5BuzdWBDzrnnHOiqKgofvjDHzZZrg5A+jVXC2bMmBFFRUVNfg4//PAmfagFkG4teU+wbNmymDx5cmQymSgvL4/DDz88Xnvttez6XNSBVgUeGzZsiIkTJ0aPHj3iD3/4Qzz//PMxe/bs2G+//bJtrr766rjmmmvi+uuvjz//+c9RU1MT//zP/xz19fXZNv/2b/8Wd955Z8ybNy8WLFgQmzZtihNOOCF27NjRqsEDHa8ldeDdd9+NiRMnxlVXXbXHftQBSLfmasHmzZvjmWeeiUsuuSSeeeaZmD9/frz44osxefLkJv2oBZBeLXlPMGLEiLj++uvj2WefjQULFsSwYcPi2GOPjbfeeivbRh2A9GpJHXjfXXfdFYsWLYqBAwfusk4dgHRraS2YNGlSrF69Ovvz+9//vsl6tQDSqyV14OWXX45PfOITMWrUqHj00Ufjr3/9a1xyySXRq1evbJuc1IGkFS666KLkE5/4xB7XNzY2JjU1NclVV12VXbZ169Ykk8kkP/7xj5MkSZJ33nkn6dGjRzJv3rxsmzfeeCPp1q1bct9997VmOEABNFcHPmjFihVJRCRLlixpslwdgPRrTS1439NPP51ERPLqq68mSaIWQNq1pQ7U1dUlEZE89NBDSZKoA5B2La0Dq1atSg444IDkueeeS4YOHZrMmTMnu04dgPRrSS2YPn16MmXKlD2uVwsg3VpSB04++eTk9NNP3+P6XNWBVn3D4+67744JEybEl770paiuro7x48fHzTffnF2/YsWKWLNmTRx77LHZZSUlJXHUUUfFwoULIyJi8eLFsX379iZtBg4cGGPHjs22ATqv5upAS6gDkH5tqQV1dXVRVFSU/QsPtQDSrbV1YNu2bXHTTTdFJpOJQw89NCLUAUi7ltSBxsbGmDZtWsyaNSsOPvjgXfpQByD9Wvqe4NFHH43q6uoYMWJEnHXWWbFu3brsOrUA0q25OtDY2Bj33ntvjBgxIo477riorq6Oj3/843HXXXdl2+SqDrQq8HjllVfixhtvjIMOOijuv//+OPfcc+OCCy6In//85xERsWbNmoiI6N+/f5Pt+vfvn123Zs2a6NmzZ/Tt23ePbYDOq7k60BLqAKRfa2vB1q1b4+KLL46pU6dGRUVFRKgFkHYtrQO/+93vok+fPtGrV6+YM2dOPPjgg1FVVRUR6gCkXUvqwPe///0oLi6OCy64YLd9qAOQfi2pBZ/97Gdj7ty58fDDD8fs2bPjz3/+cxxzzDHR0NAQEWoBpF1zdWDdunWxadOmuOqqq2LSpEnxwAMPxBe+8IU48cQT47HHHouI3NWB4tYMvLGxMSZMmBDf+973IiJi/Pjx8fe//z1uvPHGOOOMM7LtioqKmmyXJMkuyz6sJW2AwmtpHWgLdQDSozW1YPv27XHKKadEY2Nj3HDDDc32rRZAOrS0Dhx99NGxdOnSePvtt+Pmm2+Ok046KRYtWhTV1dV77FsdgHRorg4sXrw4/uu//iueeeaZVj+n1QFIj5a8Jzj55JOz7ceOHRsTJkyIoUOHxr333hsnnnjiHvtWCyAdmqsDjY2NERExZcqU+PrXvx4REePGjYuFCxfGj3/84zjqqKP22Hdr60CrvuExYMCAGDNmTJNlo0ePzt5JvaamJiJil8Rl3bp12W991NTUxLZt22LDhg17bAN0Xs3VgZZQByD9WloLtm/fHieddFKsWLEiHnzwwey3OyLUAki7ltaBsrKyGD58eBx++OFxyy23RHFxcdxyyy0RoQ5A2jVXB5544olYt25dDBkyJIqLi6O4uDheffXV+OY3vxnDhg2LCHUAuoK2/D/BgAEDYujQobF8+fKIUAsg7ZqrA1VVVVFcXNxstpCLOtCqwGPixInxwgsvNFn24osvxtChQyMi4sADD4yampp48MEHs+u3bdsWjz32WBx55JEREXHYYYdFjx49mrRZvXp1PPfcc9k2QOfVXB1oCXUA0q8lteD9sGP58uXx0EMPRWVlZZP2agGkW1vfEyRJkr18hToA6dZcHZg2bVr87W9/i6VLl2Z/Bg4cGLNmzYr7778/ItQB6Ara8p6gtrY2Xn/99RgwYEBEqAWQds3VgZ49e8Y//uM/7rVNzupAi29vniTJ008/nRQXFydXXnllsnz58mTu3LlJ7969k1/+8pfZNldddVWSyWSS+fPnJ88++2xy6qmnJgMGDEg2btyYbXPuuecmgwYNSh566KHkmWeeSY455pjk0EMPTd57773WDAcogJbUgdra2mTJkiXJvffem0REMm/evGTJkiXJ6tWrs23UAUi35mrB9u3bk8mTJyeDBg1Kli5dmqxevTr709DQkO1HLYD0aq4ObNq0Kfn2t7+d/OlPf0pWrlyZLF68OPnyl7+clJSUJM8991y2H3UA0qslnw0+bOjQocmcOXOaLFMHIN2aqwX19fXJN7/5zWThwoXJihUrkkceeSQ54ogjkgMOOMD/F0IX0ZL3BPPnz0969OiR3HTTTcny5cuT6667LunevXvyxBNPZNvkog60KvBIkiS55557krFjxyYlJSXJqFGjkptuuqnJ+sbGxuTSSy9NampqkpKSkuSTn/xk8uyzzzZps2XLluS8885L+vXrl5SWliYnnHBC8tprr7V2KECBNFcHbrvttiQidvm59NJLs23UAUi/vdWCFStW7LYORETyyCOPZNupBZBue6sDW7ZsSb7whS8kAwcOTHr27JkMGDAgmTx5cvL000836UMdgHRr7rPBh+0u8FAHIP32Vgs2b96cHHvsscn++++f9OjRIxkyZEgyffr0XZ7nagGkW0veE9xyyy3J8OHDk169eiWHHnpoctdddzVZn4s6UJQkSdKKb6cAAAAAAAB0Oq26hwcAAAAAAEBnJPAAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AACAdrvsssti3LhxhR4GAACwDytKkiQp9CAAAIDOq6ioaK/rp0+fHtdff300NDREZWVlB40KAACgKYEHAACwV2vWrMn++9e//nV897vfjRdeeCG7rLS0NDKZTCGGBgAAkOWSVgAAwF7V1NRkfzKZTBQVFe2y7MOXtJoxY0Z8/vOfj+9973vRv3//2G+//eLyyy+P9957L2bNmhX9+vWLQYMGxa233tpkX2+88UacfPLJ0bdv36isrIwpU6bEypUrO/aAAQCAVBJ4AAAAefHwww/Hm2++GY8//nhcc801cdlll8UJJ5wQffv2jUWLFsW5554b5557brz++usREbF58+Y4+uijo0+fPvH444/HggULok+fPjFp0qTYtm1bgY8GAADo7AQeAABAXvTr1y+uvfbaGDlyZMycOTNGjhwZmzdvjn//93+Pgw46KL797W9Hz54948knn4yIiHnz5kW3bt3ipz/9aRxyyCExevTouO222+K1116LRx99tLAHAwAAdHrFhR4AAADQNR188MHRrdv//Y1V//79Y+zYsdnfu3fvHpWVlbFu3bqIiFi8eHG89NJLUV5e3qSfrVu3xssvv9wxgwYAAFJL4AEAAORFjx49mvxeVFS022WNjY0REdHY2BiHHXZYzJ07d5e+9t9///wNFAAA6BIEHgAAQKfwsY99LH79619HdXV1VFRUFHo4AABAyriHBwAA0CmcdtppUVVVFVOmTIknnngiVqxYEY899lh87Wtfi1WrVhV6eAAAQCcn8AAAADqF3r17x+OPPx5DhgyJE088MUaPHh0zZ86MLVu2+MYHAADQrKIkSZJCDwIAAAAAAKA9fMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOr9fw+gShyFf/1LAAAAAElFTkSuQmCC\n", "text/plain": [ - "" + "" ] }, - "execution_count": 3, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -134,7 +129,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "p_R9T9Y5Ynp9" @@ -145,7 +139,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -157,9 +151,9 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3de3RV5Zk/8CeQCzEhCRcDwXJTRLygoHQs6kztVAHrEmtntFJEpY5WB7V2quO4vNE6WmlrqaOzdGS0Y50q01bbsVbrXSvC4JVaKwuRCrZKoCYSAkJAsn9/8OPUlEsSck5Odvh81spaZO/3vPvd593nOfucL9m7IEmSJAAAAAAAAFKsR74HAAAAAAAA0FECDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYHHDpx99tlRUFCw3c9bb72103WTJk3KPH7YsGE7bHPjjTdm2rzzzjtx4oknxl577RXV1dVx2WWXxUcffZRZv3LlyvjSl74UI0eOjB49esQll1yy3TgfeOCBGDduXFRVVUVZWVmMGTMm7rnnntw+OSmXlrmNiFizZk3MmDEjampqoqSkJEaOHBkPP/xw7p6cFNs2d+eff/5262bMmBEFBQVx9tlnt2jbVeZ5m7lz50ZBQUF8/vOf7+Cz0b1le64jIubPnx+f+9znok+fPtGrV68YPXp0fO9734stW7a0aFdfXx9Tp06NioqKqKqqinPOOSfWrVuXWb9x48Y4++yzY/To0VFYWLjDuXzmmWd2OKba2tosPDsAAAAAe7bCfGz0g/WbOnV7fcqK2/2YSZMmxQ9+8IMWy/bee++drispKWnx+ze/+c0499xzWyzr3bt3RERs2bIlTjzxxBg4cGDMnz8/Vq5cGWeeeWYUFRXFDTfcEBERTU1Nsffee8dVV10Vs2fP3uEY+/btG1deeWWMGjUqiouL46GHHorp06dHdXV1TJw4sd373FENTQ2dur3Kksrdelwa5nbTpk1x/PHHR3V1dfz0pz+NffbZJ1asWBFVVVW7tc8dtaFhY6dur7SyV7sfM3jw4Jg7d27Mnj07SktLI2LrF9D33ntvDBkypEXbrjLP2yxfvjwuvfTS+Ou//ut273c2bamr69Tt9ezXb7cel825/tnPfhannXZaTJ8+PZ5++umoqqqKJ554Iv75n/85FixYED/+8Y+joKAgIiKmTp0aK1eujMcffzw2b94c06dPj/POOy/uvffeiNh6XJSWlsbFF18c999//y73YcmSJVFRUZH5vbq6ereeCwAAAAD+LC+BxwnffrpTt/d/32j/l/8lJSUxcODAdq/bpnfv3jtt89hjj8Ubb7wRTzzxRAwYMCDGjBkT1113XVx++eUxc+bMKC4ujmHDhsXNN98cERF33XXXDvs59thjW/z+1a9+Ne6+++6YN29eXgKPaY98qVO39+Dnf7lbj0vD3N51111RX18f8+fPj6KioojY+lcH+fLDM3/aqdv7yv+e0e7HHH744bFs2bJ44IEHYurUqRGx9a+ghgwZEsOHD2/RtqvMc8TWL8mnTp0a3/jGN+K5556LNWvWtGe3s6r20DGdur193v3Dbj0uW3O9fv36OPfcc2Py5Mlxxx13ZJb/wz/8QwwYMCAmT54cP/7xj+OLX/xiLF68OH71q1/Fiy++GOPGjYuIiFtuuSU+97nPxXe/+90YNGhQlJWVxW233RYREc8///wu57K6ujpvASYAAABAd+WSVnmwYMGCGD16dAwYMCCzbOLEibF27dr43e9+t1t9JkkSTz75ZCxZsiT+5m/+JltDpZ2yNbcPPvhgjB8/PmbMmBEDBgyIQw45JG644YbtLrFDS1/+8pdb/G/+u+66K6ZPn5717WTzNfzNb34zqqur45xzzsn2MLu1bMz1Y489FnV1dXHppZdut+6kk06KkSNHxn333RcRW+e8qqoqE3ZERBx33HHRo0ePWLhwYbvHP2bMmKipqYnjjz8+nn/++XY/HgAAAIDtCTx24qGHHory8vLMz6mnnrrTdeXl5ZnL2Gxz+eWXb9fmueeei4iI2traFl+URkTm9/Zex72hoSHKy8ujuLg4TjzxxLjlllvi+OOP351d3mOkYW5///vfx09/+tPYsmVLPPzww3H11VfHTTfdFP/6r/+6u7u9RzjjjDNi3rx5sWLFilixYkU8//zzccYZ2/+1SFeZ53nz5sWdd94Zc+bMae+u7vGyMddvvvlmREQceOCBO9zGqFGjMm1qa2u3u+xUYWFh9O3bt11zXlNTE7fffnvcf//9cf/998fgwYPj2GOPjVdeeaXNfQAAAACwY3m5pFUafOYzn8lcmiQioqysbKfrIrbeT+PjLrvsssyNc7fZZ599sj7O3r17x6JFi2LdunXx5JNPxj/90z/Fvvvuu93lrvizNMxtc3NzVFdXxx133BE9e/aMI444It599934zne+E9dee21Wt9Wd7L333nHiiSfGf/3Xf0WSJHHiiSdG//79t2vXFea5sbExpk2bFnPmzNnhGNm1bM51kiQ5HevHHXDAAXHAAQdkfj/qqKNi2bJlMXv27Ljnnns6bRwAAAAA3VFeAo9H/vkz+dhsu5SVlcWIESPavW6b/v3777TNwIED44UXXmixbNWqVZl17dGjR4/MdsaMGROLFy+Ob33rW3kJPO454d5O3+buSMPc1tTURFFRUfTs2TOz7MADD4za2trYtGlTFBcXt7mvbDjzh3/fqdvriC9/+ctx4YUXRkTEv//7v++wTVeY52XLlsXy5cvjpJNOyixrbm6OiK1/ObBkyZLYb7/92tRXtgx8bVGnbq+jOjrXI0eOjIiIxYsXx1FHHbXd+sWLF8dBBx0UEVvndfXq1S3Wf/TRR1FfX9/uuv2X/uqv/irmzZvXoT4AAAAAyFPg0aesc7+s7WrGjx8f119/faxevTpziZTHH388KioqMl+u7a7m5uZoamrKxjDbrbKkMi/b7UqyNbdHH3103HvvvdHc3Bw9emy98tybb74ZNTU1nR52RESUVvbq9G3urkmTJsWmTZuioKAgJk6cmJNtZGOeR40aFb/97W9bLLvqqquisbExbr755hg8eHDWx92anv36dfo2O6Kjcz1hwoTo27dv3HTTTdsFHg8++GAsXbo0rrvuuojYOudr1qyJl19+OY444oiIiHjqqaeiubk5jjzyyA7tx6JFi6KmpqZDfQAAAADgkla7pampabtrthcWFra4nEpjY+N2bfbaa6+oqKiICRMmxEEHHRTTpk2Lb3/721FbWxtXXXVVzJgxI0pKSjLtFy3a+r+t161bF3/6059i0aJFUVxcnPlC9Vvf+laMGzcu9ttvv2hqaoqHH3447rnnnu0u30LbdZW5veCCC+LWW2+Nr371q3HRRRfF0qVL44YbboiLL744V7vebfTs2TMWL16c+feOdIV57tWrVxxyyCEt+q+qqoqI2G45O9bRuS4rK4v/+I//iNNPPz3OO++8uPDCC6OioiKefPLJuOyyy+Lv//7v47TTTouIrX9hNWnSpDj33HPj9ttvj82bN8eFF14Yp59+egwaNCjT9xtvvBGbNm2K+vr6aGxszBwDY8aMiYiI73//+zF8+PA4+OCDY+PGjfGf//mf8dRTT8Vjjz2W9ecHAAAAYI+TsJ2zzjorOfnkk3e6LiK2+znggAMybYYOHbrDNl/5ylcybZYvX56ccMIJSWlpadK/f//k61//erJ58+YW29pRH0OHDs2sv/LKK5MRI0YkvXr1Svr06ZOMHz8+mTt3bnafjG4mLXObJEkyf/785Mgjj0xKSkqSfffdN7n++uuTjz76KHtPRjeyq3lNkiQ5+eSTk7POOivTtivNc3v2g+zPdZIkya9//etk4sSJSUVFRVJcXJwcfPDByXe/+93tXm91dXXJlClTkvLy8qSioiKZPn160tjY2KLNzo6dbWbNmpXst99+Sa9evZK+ffsmxx57bPLUU0918FkBAAAAIEmSpCBJOvFurQAAAAAAADnQI98DAAAAAAAA6CiBBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKlXmKuOm5ub47333ovevXtHQUFBrjYDAAAAAACkQJIk0djYGIMGDYoePbL/9xg5Czzee++9GDx4cK66BwAAAAAAUugPf/hDfOITn8h6vzkLPHr37h0RWwdeUVGRq80AAAAAAAApsHbt2hg8eHAmP8i2nAUe2y5jVVFRIfAAAAAAAAAiInJ2Gww3LQcAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6OQ881td/mOtNQLf0x5Xvxnd/clP8ceW7+R4K3Uz9xvq4d/GPon5jfURsrdMv3febrNbrXPSZZltWrYq1N30vtqxale+hZE132KdsHadpOt7/8vXf0XaQbbk49j7e5/uNTTHn6bfi/camrPXPnq2zjil1GSC3ukud7S77EdG99oXOlfPAY8OajbneBHRLtXW18euip6K2rjbfQ6Gb+WBjfcxdcm988P9PGj78YEO8PPe38eEHG7K2jVz0mWZbVq+Oxu/Nji2rV+d7KFnTHfYpW8dpmo73v3z9d7QdZFsujr2P9/l+Y1Pc+cwygQdZ01nHlLoMkFvdpc52l/2I6F77QudySSsAAAAAACD1BB4AAAAAAEDqFeZ6A03rN8WGBpe1gvba9OHmiIhY/9H6aGhqyPNo6E7WbVq3w+VN67JXr5vWbcpKP91N85qG2FJXl+9hZEXzmu5Tlzp67KfxeF+3ad0u31t2Viegs7R2jLa3r7/UuGFzfLA+fa9dup7GDZs7dXvZfG0A8Gfd7fy3O7xfdLc5ofPkPPB49Ppno7SoNNebgW6noV99xOSIm5bPilie79GwJ/jlNU/mewjdXt3pU/I9BHZgTzz2r55/Zb6HALuU62P0oh++lNP+IVfUbwDawvsFezKXtAIAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9nN/DY+KVn45hhwzJ9Wag23l1yaKY996j8fVhl8eYAw/L93DoRpY3vL3D63me+M3PRr9hfbKyjbrlH+yR90VoTb+590XRQQfmexhZsfmNxd3mniQdPfbTeLxfd9T1Maxy+E7X76xOQGdp7Rhtjx0dz7ecOS5GDOydlf7Zs71V29ip94TJ5msDgD/rbue/3eH9orvNCZ0n54FHSVlxlFb2yvVmoNsp3qsoIiLKCsuisqQyz6OhOykvLt/h8pLy7NXrkvLirPTT3fSoqoye/frlexhZsaWq+9Sljh77aTzey4vLd/nesrM6AZ2ltWO0vX39pd6lRdGnLH2vXbqe3qVFnbq9bL42APiz7nb+2x3eL7rbnNB5XNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6vWcOXPmzFx03NTUFDfeeGP8y7/8S/SucpMZaK/mLUk0Ll8fnxr1qajoXZHv4dDN9OpZGqP3PjRKC0sjIqKotDAGHTIgirN4481c9JlmBWVlUTJ+fPQo7z7vid1hn7J1nKbpeP/L139H20G25eLY+3ifpcU944jhfWOvksKs9c+erbOOKXUZILe6S53tLvsR0b32hT/blhtcccUVUVJSkvX+C5IkSbLea0SsXbs2Kisro6GhISoqfFkLAAAAAAB7slznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8uoH6jfVx7+IfRf3G+i7RT67taJwdHfv7jU0x5+m34v3Gpqy0y5Zcba+z94PcM6fp1hXmryuMAei61AjypS3n+bvzWWDLqlWx9qbvxZZVq3ZrXB19PLBVe95fvBd1nnw91+Y4+zyndEV1OT4eBR7dwAcb62Puknvjgw4GFdnqJ9d2NM6Ojv39xqa485llbQo82tIuW3K1vc7eD3LPnKZbV5i/rjAGoOtSI8iXtpzn785ngS2rV0fj92bHltWrd2tcHX08sFV73l+8F3WefD3X5jj7PKd0RXXrBB4AAAAAAAC7JPAAAAAAAABST+ABAAAAAACkXmG+B0D2rNu0LhqaGjr0+DT5+P5ma+yNGzbHB+s37XJ9PrQ2rt3pj+4p28cKnaMrvSYdQ8COdKU6xZ5pV591OvJZoHlNQ2ypq9utxwHZ05ZzUO9Fna+zPxuY49zxOY+upHHDRzntX+DRjVw9/8p8D6FT5WJ/L/rhS1nvMxu66rjoehwrdJRjCICuKFefdepOn5KTfoH2cQ7aNZmX7sNc0pV81LQ+p/27pBUAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDquYdHN3LdUdfHsMrhu/345Q1vp+o+IB/f32yN/ZYzx8WIgb13uv6t2sa8XPewtXG1V772g9zL9rFC5+hKr0nHELAjXalOsWfa1WedjnwW6Df3vig66MB2P27zG4vd/wOyqC3noN6LOl9nfzYwx7njcx5dyaKl78Xfzspd/wKPbqS8uDwqSyo79Pg0+fj+ZmvsvUuLok9Z8S7X50Nr49qd/uiesn2s0Dm60mvSMQTsSFeqU+yZdvVZpyOfBXpUVUbPfv3a/bgtVbv/uQvYXlvOQb0Xdb7O/mxgjnPH5zy6kt6luY0kXNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9XrOnDlzZi46bmpqihtvvDGuuOKKKCkpycUm+JhePUtj9N6HRmlhaZfoJ9d2NM6Ojr20uGccMbxv7FWy6+vItbVdtuRqe529H+SeOU23rjB/XWEMQNelRpAvbTnP353PAgVlZVEyfnz0KN+9e4B09PHAVu15f/Fe1Hny9Vyb4+zznNLVNDU1xc3f+07OcoOCJEmSrPcaEWvXro3KyspoaGiIioqKXGwCAAAAAABIiVznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN47CHeb2yKOU+/Fe83NnWoDQDs6eo31se9i38U9Rvrd7kMAAAA6FwCjz3E+41Nceczy1oNPFprAwB7ug821sfcJffGBx8LN3a0DAAAAOhcAg8AAAAAACD1BB4AAAAAAEDqFeZ7AHSuxg2b44P1m3a6DgBom3Wb1kVDU0Pm3wAAAEB+CTz2MBf98KV8DwEAuoWr51+Z7yEAAAAAH+OSVgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKnnHh57mFvOHBcjBvbe4bq3ahvd4wMA2ui6o66PYZXDIyJiecPb7ukBAAAAeSbw2MP0Li2KPmXFO10HALRNeXF5VJZUZv4NAAAA5JdLWgEAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gcceon/vkjjn2P2if++SDrUBgD1dn1594/QDvhR9evXd5TIAAACgcxUkSZLkouO1a9dGZWVlNDQ0REVFRS42AQAAAAAApESucwN/4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1CnPVcZIkERGxdu3aXG0CAAAAAABIiW15wbb8INtyFnjU1dVFRMTgwYNztQkAAAAAACBl6urqorKyMuv95izw6Nu3b0REvPPOOzkZOJAOa9eujcGDB8cf/vCHqKioyPdwgDxQBwB1AIhQCwB1AIhoaGiIIUOGZPKDbMtZ4NGjx9bbg1RWVipgQFRUVKgFsIdTBwB1AIhQCwB1APhzfpD1fnPSKwAAAAAAQCcSeAAAAAAAAKnXc+bMmTNz1nnPnnHsscdGYWHOrpwFpIBaAKgDgDoARKgFgDoA5LYOFCRJkmS9VwAAAAAAgE7kklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSr92Bx7vvvhtnnHFG9OvXL0pLS2P06NHx0ksvZdYnSRLXXHNN1NTURGlpaRx33HGxdOnSFn3U19fH1KlTo6KiIqqqquKcc86JdevWdXxvgE7RWh144IEHYsKECdGvX78oKCiIRYsWbdfHxo0bY8aMGdGvX78oLy+Pv/u7v4tVq1Z15m4AHbSrWrB58+a4/PLLY/To0VFWVhaDBg2KM888M957770WfTgngHRr7Zxg5syZMWrUqCgrK4s+ffrEcccdFwsXLmzRhzoA6dZaHfi4888/PwoKCuL73/9+i+XqAKRfa7Xg7LPPjoKCghY/kyZNatGHWgDp1pZzgsWLF8fkyZOjsrIyysrK4pOf/GS88847mfXZ+L6wXYHHBx98EEcffXQUFRXFI488Em+88UbcdNNN0adPn0ybb3/72/Fv//Zvcfvtt8fChQujrKwsJk6cGBs3bsy0mTp1avzud7+Lxx9/PB566KH49a9/Heedd167Bg7kR1vqwPr16+OYY46JWbNm7bSfr33ta/GLX/wifvKTn8Szzz4b7733XnzhC1/ojF0AsqC1WvDhhx/GK6+8EldffXW88sor8cADD8SSJUti8uTJLfpxTgDp1ZZzgpEjR8att94av/3tb2PevHkxbNiwmDBhQvzpT3/KtFEHIL3aUge2+dnPfhb/93//F4MGDdpunToA6dbWWjBp0qRYuXJl5ue+++5rsV4tgPRqSx1YtmxZHHPMMTFq1Kh45pln4rXXXourr746evXqlWmTle8Lk3a4/PLLk2OOOWan65ubm5OBAwcm3/nOdzLL1qxZk5SUlCT33XdfkiRJ8sYbbyQRkbz44ouZNo888khSUFCQvPvuu+0ZDpAHrdWBj3v77beTiEheffXVFsvXrFmTFBUVJT/5yU8yyxYvXpxERLJgwYKsjhfIjfbUgm1eeOGFJCKSFStWJEninADSbnfqQENDQxIRyRNPPJEkiToAadfWOvDHP/4x2WeffZLXX389GTp0aDJ79uzMOnUA0q8tteCss85KTj755J2uVwsg3dpSB774xS8mZ5xxxk7XZ+v7wnb9hceDDz4Y48aNi1NPPTWqq6tj7NixMWfOnMz6t99+O2pra+O4447LLKusrIwjjzwyFixYEBERCxYsiKqqqhg3blymzXHHHRc9evTY7s/bga6ntTrQFi+//HJs3ry5Ra0YNWpUDBkyJFMrgK5td2pBQ0NDFBQURFVVVUQ4J4C0a28d2LRpU9xxxx1RWVkZhx12WESoA5B2bakDzc3NMW3atLjsssvi4IMP3q4PdQDSr63nBM8880xUV1fHAQccEBdccEHU1dVl1qkFkG6t1YHm5ub45S9/GSNHjoyJEydGdXV1HHnkkfHzn/880yZb3xe2K/D4/e9/H7fddlvsv//+8eijj8YFF1wQF198cdx9990REVFbWxsREQMGDGjxuAEDBmTW1dbWRnV1dYv1hYWF0bdv30wboOtqrQ60RW1tbRQXF2e+9Nzm47UC6NraWws2btwYl19+eUyZMiUqKioiwjkBpF1b68BDDz0U5eXl0atXr5g9e3Y8/vjj0b9//4hQByDt2lIHZs2aFYWFhXHxxRfvsA91ANKvLbVg0qRJ8cMf/jCefPLJmDVrVjz77LNxwgknxJYtWyJCLYC0a60OrF69OtatWxc33nhjTJo0KR577LE45ZRT4gtf+EI8++yzEZG97wsL2zPw5ubmGDduXNxwww0RETF27Nh4/fXX4/bbb4+zzjqrPV0BKaUOABHtqwWbN2+O0047LZIkidtuuy0fwwVyoK114DOf+UwsWrQo3n///ZgzZ06cdtppsXDhwu2+1ADSp7U68PLLL8fNN98cr7zyShQUFOR5tECutOWc4PTTT8+0Hz16dBx66KGx3377xTPPPBOf/exn8zJuIHtaqwPNzc0REXHyySfH1772tYiIGDNmTMyfPz9uv/32+PSnP521sbTrLzxqamrioIMOarHswAMPzNxJfeDAgRER2905fdWqVZl1AwcOjNWrV7dY/9FHH0V9fX2mDdB1tVYH2mLgwIGxadOmWLNmTYvlH68VQNfW1lqwLexYsWJFPP7445vo3U4AAAbjSURBVJm/7ohwTgBp19Y6UFZWFiNGjIhPfepTceedd0ZhYWHceeedEaEOQNq1Vgeee+65WL16dQwZMiQKCwujsLAwVqxYEV//+tdj2LBhEaEOQHewO98T7LvvvtG/f/946623IkItgLRrrQ70798/CgsLW80WsvF9YbsCj6OPPjqWLFnSYtmbb74ZQ4cOjYiI4cOHx8CBA+PJJ5/MrF+7dm0sXLgwxo8fHxER48ePjzVr1sTLL7+cafPUU09Fc3NzHHnkke0ZDpAHrdWBtjjiiCOiqKioRa1YsmRJvPPOO5laAXRtbakF28KOpUuXxhNPPBH9+vVr0d45AaTb7p4TNDc3R1NTU0SoA5B2rdWBadOmxWuvvRaLFi3K/AwaNCguu+yyePTRRyNCHYDuYHfOCf74xz9GXV1d1NTURIRaAGnXWh0oLi6OT37yk7tsk7XvC9t8e/MkSV544YWksLAwuf7665OlS5cmP/rRj5K99tor+e///u9MmxtvvDGpqqpK/vd//zd57bXXkpNPPjkZPnx4smHDhkybSZMmJWPHjk0WLlyYzJs3L9l///2TKVOmtGcoQJ60pQ7U1dUlr776avLLX/4yiYhk7ty5yauvvpqsXLky0+b8889PhgwZkjz11FPJSy+9lIwfPz4ZP358PnYJ2A2t1YJNmzYlkydPTj7xiU8kixYtSlauXJn5aWpqyvTjnADSq7U6sG7duuSKK65IFixYkCxfvjx56aWXkunTpyclJSXJ66+/nulHHYD0astng780dOjQZPbs2S2WqQOQbq3VgsbGxuTSSy9NFixYkLz99tvJE088kRx++OHJ/vvvn2zcuDHTj1oA6dWWc4IHHnggKSoqSu64445k6dKlyS233JL07Nkzee655zJtsvF9YbsCjyRJkl/84hfJIYcckpSUlCSjRo1K7rjjjhbrm5ubk6uvvjoZMGBAUlJSknz2s59NlixZ0qJNXV1dMmXKlKS8vDypqKhIpk+fnjQ2NrZ3KECetFYHfvCDHyQRsd3Ptddem2mzYcOG5B//8R+TPn36JHvttVdyyimntAhEgK5vV7Xg7bff3mEdiIjk6aefzrRzTgDptqs6sGHDhuSUU05JBg0alBQXFyc1NTXJ5MmTkxdeeKFFH+oApFtrnw3+0o4CD3UA0m9XteDDDz9MJkyYkOy9995JUVFRMnTo0OTcc89NamtrW/ShFkC6teWc4M4770xGjBiR9OrVKznssMOSn//85y3WZ+P7woIkSZJ2/HUKAAAAAABAl9Oue3gAAAAAAAB0RQIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAA6LCzzz47Pv/5z+d7GAAAwB6sMN8DAAAAuraCgoJdrr/22mvj5ptvjiRJOmlEAAAA2xN4AAAAu7Ry5crMv//nf/4nrrnmmliyZElmWXl5eZSXl+djaAAAABkuaQUAAOzSwIEDMz+VlZVRUFDQYll5efl2l7Q69thj46KLLopLLrkk+vTpEwMGDIg5c+bE+vXrY/r06dG7d+8YMWJEPPLIIy229frrr8cJJ5wQ5eXlMWDAgJg2bVq8//77nb3LAABACgk8AACAnLj77rujf//+8cILL8RFF10UF1xwQZx66qlx1FFHxSuvvBITJkyIadOmxYcffhgREWvWrIm//du/jbFjx8ZLL70Uv/rVr2LVqlVx2mmn5XlPAACANBB4AAAAOXHYYYfFVVddFfvvv39cccUV0atXr+jfv3+ce+65sf/++8c111wTdXV18dprr0VExK233hpjx46NG264IUaNGhVjx46Nu+66K55++ul4880387w3AABAV+ceHgAAQE4ceuihmX/37Nkz+vXrF6NHj84sGzBgQERErF69OiIifvOb38TTTz+9w/uBLFu2LEaOHJnjEQMAAGkm8AAAAHKiqKioxe8FBQUtlhUUFERERHNzc0RErFu3Lk466aSYNWvWdn3V1NTkcKQAAEB3IPAAAAC6hMMPPzzuv//+GDZsWBQW+qgCAAC0j3t4AAAAXcKMGTOivr4+pkyZEi+++GIsW7YsHn300Zg+fXps2bIl38MDAAC6OIEHAADQJQwaNCief/752LJlS0yYMCFGjx4dl1xySVRVVUWPHj66AAAAu1aQJEmS70EAAAAAAAB0hP8mBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1/h//ffiQt3ubxgAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAl9UlEQVR4nO3de3RV5Z0//k8gEEJIjpAYAnJzitzECpWZqnTVajtKRwutq/WCIkjrZU3VTlupdrqsOi67rFOko1ZbrTq9UGnroF+trbd6RSq2CK1WRlFBRblowBAEApL9+4OfZ4xccjsnJzu8XmtlLbL3s5/97PPs8znn8M7ZuyhJkiQAAAAAAABSrFuhBwAAAAAAANBeAg8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2Bx27MmDEjioqKdvl56aWX9rhu0qRJ2e2HDRu22zZXXXVVts1rr70Wn/vc56KsrCyqqqriggsuiG3btmXXb926NWbMmBGHHHJIFBcXx+c///ldxrlgwYKYOHFiVFZWRmlpaYwaNSrmzJmT18cm7dIytxERDQ0N8Z3vfCeGDh0aJSUl8ZGPfCRuvfXWvD02aff+/J177rm7rPvXf/3XKCoqihkzZjRp21nm+n1PPvlkFBcXx7hx49r1WHR1uZ7riIiFCxfGv/zLv0Tfvn2jV69eccghh8Ts2bNjx44dTdpt2LAhpk2bFplMJjKZTEybNi3eeeedJm2+9rWvxWGHHRYlJSW7ncuVK1fudkz33Xdfux4XAAAAgH1dcSF2uuHdbc03yqG+ZT1bvc2kSZPitttua7Js//333+O6kpKSJr//x3/8R5x11llNlpWXl0dExI4dO+L444+P/fffPxYsWBC1tbUxffr0SJIkrrvuumyb0tLSuOCCC+J//ud/djvGsrKyOO+88+KjH/1olJWVxYIFC+Kcc86JsrKyOPvss1t9zLlQ11DXYfvKlGTatF0a5jYi4qSTToq1a9fGLbfcEsOHD49169bFe++916Zjbq8tdVs7dH+lmV5t2m7w4MExb968mDNnTpSWlkbEztDh9ttvjyFDhjRp25nmOiKirq4uzjjjjPj0pz8da9eubf3B58iO2toO3V/3yso2bZfLub7zzjvjpJNOijPPPDMeeeSR2G+//eKhhx6Kb33rW/HUU0/Fb37zmygqKoqIiKlTp8aqVauy4cTZZ58d06ZNi3vuuSfbX5IkMXPmzFi0aFH87W9/2+MxPPTQQ3HwwQdnf+/Xr1+bHgsAAAAAdipI4PHZqx/p0P09dflxrd6mpKQkampqWr3ufeXl5Xts88ADD8Tzzz8fr7/+egwcODAiImbPnh0zZsyIK6+8MioqKqKsrCxuvPHGiNj5V98f/gviiIjx48fH+PHjs78PGzYs5s+fH0888UTBAo9pf5jaYfu6+/P3tmm7NMztfffdF4899li88sor2f8EHTZsWAuPMPd+fsYdHbq/c/7f6W3a7mMf+1i88sorMX/+/DjttNMiImL+/PkxePDg+Id/+IcmbTvLXL/vnHPOialTp0b37t3jrrvuauER596aj47r0P0d8MbrbdouV3P97rvvxllnnRWTJ0+Om266Kbv8K1/5SvTv3z8mT54cv/nNb+Lkk0+OZcuWxX333RdPPfVUfPzjH4+IiJtvvjmOOOKIeOGFF2LkyJEREXHttddGRMRbb72118CjsrKy2XMQAAAAgJZzSasC+NOf/hRjx47N/idpRMRxxx0XDQ0NsXjx4jb3u2TJkli4cGEcddRRuRgmbZCrub377rtjwoQJcfXVV8cBBxwQI0aMiAsvvDC2bNmSj2F3KWeeeWaTv+a/9dZbY+bMmTnfTy6fx7fddlu8/PLLcemll+Z6mF1aLub6gQceiNra2rjwwgt3Wfe5z30uRowYEbfffntE7JzzTCaTDTsiIg4//PDIZDKxcOHCVo9/8uTJUV1dHRMnTow77ujYUBEAAACgKxJ47MHvfve76NOnT/bnS1/60h7X9enTJ6644oom21900UW7tHn00UcjImLNmjXRv3//Ju379u0bPXv2jDVr1rR6rIMGDYqSkpKYMGFCfPWrX42vfOUrrT/gfUga5vaVV16JBQsWxHPPPRd33nln/PCHP4w77rgjvvrVr7b9wPcR06ZNiwULFsTKlSvj1VdfjSeffDJOP33Xb4x0lrlevnx5XHzxxTF37twoLi7Il+5SKxdz/eKLL0ZExOjRo3e7j1GjRmXbrFmzJqqrq3dpU11d3ao579OnT1xzzTVxxx13xO9///v49Kc/HSeffHL88pe/bHEfAAAAAOzK/67twdFHH529FE3Ezvtl7GldxK7XXp81a1b2prnvO+CAA7L/fv968B+UJMlulzfniSeeiE2bNsVTTz0VF198cQwfPjxOPfXUVvezr0jD3DY2NkZRUVHMnTs3Mpmd9yq55ppr4otf/GL86Ec/yt6zgF1VVVXF8ccfHz/72c8iSZI4/vjjo6qqapd2nWGud+zYEVOnTo3LL788RowY0aJt+D+5nOskSXa7jw/PZy6e31VVVfH1r389+/uECRNiw4YNcfXVV+82sAEAAACgZQoSePzhW0cXYretUlZWFsOHD2/1uvdVVVXtsU1NTU0sWrSoybINGzbE9u3bd/mL8ZY48MADIyLikEMOibVr18Zll11WsMDjF5/9VUH22xppmNsBAwbEAQcckA07Inb+BXqSJLFq1ao46KCDWtxXLpzx8y926P7aa+bMmXHeeedFRMSPfvSj3bbpDHNdX18ff/nLX2LJkiXZ8TY2NkaSJFFcXBwPPPBAHHPMMS3qK1dq/ra0Q/fXXu2d6/eDpmXLlsWRRx65y/r//d//jTFjxkTEzjnf3Q3l33rrrTbV7g86/PDD46c//Wm7+gAAAADY1xUk8Ohb1rMQu+00jjjiiLjyyitj9erVMWDAgIjYeR35kpKSOOyww9rVd5Ik0dDQkIthtkmmJNN8oy4sV3M7ceLE+O1vfxubNm2KPn36RMTOS+9069YtBg0alJex701ppleH77M9Jk2aFNu2bYuInffVyIdczHVFRUU8++yzTZbdcMMN8fDDD8cdd9yRDTM7UvfKyg7fZ3u0d66PPfbY6NevX8yePXuXwOPuu++O5cuXZy9/dcQRR0RdXV08/fTT8U//9E8REbFo0aKoq6vbbVjSGkuWLMmeRwAAAAC0jUtatUFDQ8Mu12svLi5ucimV+vr6Xdr07t07Kioq4thjj40xY8bEtGnT4j//8z9j/fr1ceGFF8ZZZ50VFRUV2fbPP/98bNu2LdavXx/19fWxdOnSiIgYN25cROz8a+YhQ4bEqFGjIiJiwYIF8YMf/CDOP//8PBz1vqGzzO3UqVPjiiuuiDPPPDMuv/zyePvtt2PWrFkxc+ZMl7Nqge7du8eyZcuy/96dzjDX3bp1i7Fjxzbpv7q6Onr16rXLcnavvXNdVlYWP/nJT+KUU06Js88+O84777yoqKiIP/7xjzFr1qz44he/GCeddFJE7PyW1aRJk+Kss86Kn/zkJxERcfbZZ8cJJ5wQI0eOzPb90ksvxaZNm2LNmjWxZcuW7JyPGTMmevbsGT/72c+iR48eMX78+OjWrVvcc889ce2118b3v//9XD88AAAAAPuWhF1Mnz49mTJlyh7XRcQuPyNHjsy2GTp06G7bnHPOOdk2r776anL88ccnpaWlSb9+/ZLzzjsv2bp1a5N97amf91177bXJwQcfnPTu3TupqKhIxo8fn9xwww3Jjh07cvuAdCFpmdskSZJly5Yln/nMZ5LS0tJk0KBByTe+8Y1k8+bNuXswupi9zW2SJMmUKVOS6dOnZ9t2prn+oEsvvTQ59NBD2/QY7CtyPddJkiSPP/54MmnSpCSTySQ9e/ZMxowZk/zgBz9I3nvvvSbtamtrk9NOOy0pLy9PysvLk9NOOy3ZsGFDkzZHHXXUbve5YsWKJEmS5L//+7+T0aNHJ717907Ky8uTww47LPnFL37R3ocFAAAAYJ9XlCR7uFMrAAAAAABASnQr9AAAAAAAAADaS+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6hXnq+PGxsZ48803o7y8PIqKivK1GwAAAAAAIAWSJIn6+voYOHBgdOuW++9j5C3wePPNN2Pw4MH56h4AAAAAAEih119/PQYNGpTzfvMWeJSXl0fEzoFXVFTkazcAAAAAAEAKbNy4MQYPHpzND3Itb4HH+5exqqioEHgAAAAAAAAREXm7DYablgMAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9vAce767fnO9dQJe0avUb8YPfzo5Vq98o9FDoYtZvXR+/WjY31m9dHxE76/Rfbv9rTut1PvpMsx1r18bG2dfEjrVrCz2UnOkKx5Sr8zRN5/uHn//tbQe5lo9z74N9vl3fEDc/8lK8Xd+Qs/7Zt3XkOaU2A+RPV6mxXeU4IrrWsdCx8h54bHlna753AV3Smto18XiPh2NN7ZpCD4UuZsPW9THvhV/Fhv//TcPmDVti8bxnY/OGLTnbRz76TLMd69ZF/TVzYse6dYUeSs50hWPK1XmapvP9w8//9raDXMvHuffBPt+ub4hbHn1Z4EHOdOQ5pTYD5E9XqbFd5Tgiutax0LFc0goAAAAAAEg9gQcAAAAAAJB6xfneQcO722JLnctaQWtt27w9IiLefe/dqGuoK/Bo6Eo2bdu02+UNm3JXrxs2bctJP11N4zt1saO2ttDDyInGd7pOXWrvuZ/G833Ttk17fW3ZU52AjtLcOdravj6sfsv22PBu+p67dD71W7Z3+D5z+fwAYKeu9v63K7xWdLU5oePkPfC4/8rHorRHab53A11OXeX6iMkRs1d+P2JloUfDvuDe7/6x0EPo8mpPObXQQ2A39sVz/5KF3yn0EGCv8n2Onv/zv+S1f8gnNRyA5nitYF/mklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpl/d7eBz3naNi2Ngh+d4NdDlLXlgaC968P7457KIYN/rQQg+HLmRl3YrdXs/z+P/4dFQO65uTfdSu3LBP3hehOZXzbo8eY0YXehg5sf35ZV3mniTtPffTeL5fceSVMSxz4B7X76lOQEdp7hxtjd2dz9edMSGG15TnpH/2bS+tqe/we8Lk8vkBwE5d7f1vV3it6GpzQsfJe+BRUtYzSjO98r0b6HJ69u4RERFlxWWRKckUeDR0JX169tnt8pI+uavXJX165qSfrqbbfpnoXllZ6GHkxI79uk5dau+5n8bzvU/PPnt9bdlTnYCO0tw52tq+Pqy8tEf0LUvfc5fOp7y0R4fvM5fPDwB26mrvf7vCa0VXmxM6jktaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASL28Bx6l+7lhObRFTWVNfHL7MVFTWVPoodDF9O3VL04ZOTX69uoXERG9+5bGYaccEr37luZsH/noM826V1dH+Te+Ht2rqws9lJzpCseUq/M0Tef7h5//7W0HuZaPc++DfVaVl8SXP/WRqCovyVn/7Ns68pxSmwHyp6vU2K5yHBFd61joWEVJkiT56Hjjxo2RyWSirq4uKioq8rELAAAAAAAgJfKdG7ikFQAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw6ALWb10fv1o2N9ZvXd8p+sm33Y2zvWN/u74hbn7kpXi7viEn7XIlX/vr6OMg/8xpunWW+ess4wA6H/WBQmnJ+/y2fBbYsXZtbJx9TexYu7ZN42rv9sBOrXl98VrUsQrxeJvj3POY0hnV5vl8FHh0ARu2ro95L/wqNrQzqMhVP/m2u3G2d+xv1zfELY++3KLAoyXtciVf++vo4yD/zGm6dZb56yzjADof9YFCacn7/LZ8Ftixbl3UXzMndqxb16ZxtXd7YKfWvL54LepYhXi8zXHueUzpjGo3CTwAAAAAAAD2SuABAAAAAACknsADAAAAAABIveJCD4Dc2bRtU9Q11LVr+zT54PHmauz1W7bHhne37XV9ITQ3rrb0R9eU63OFjtHZnpPOI+DDOludYt+zt8867fks0PhOXeyorW3TdkDutOT9p9eiwujIzwbmOH98xqMzqd/yXl77F3h0IZcs/E6hh9Ch8nG85//8LznvMxc667jofJwr5ILzCIDOJl+fdWpPOTUv/QKt4/1n52VuugbzSGfyXsO7ee3fJa0AAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSzz08upArjrwyhmUObPP2K+tWpOo+IB883lyN/bozJsTwmvI9rn9pTX1BrnvY3Lhaq1DHQf7l+lyhY3S256TzCPiwzlan2Pfs7bNOez4LVM67PXqMGd3q7bY/v8z9PyCHWvL+02tRYXTkZwNznD8+49GZLF3+Zhzz/fz1L/DoQvr07BOZkky7tk+TDx5vrsZeXtoj+pb13Ov6QmhuXG3pj64p1+cKHaOzPSedR8CHdbY6xb5nb5912vNZoNt+meheWdnq7Xbs1/bPXcCuWvL+02tRYXTkZwNznD8+49GZlJfmN5JwSSsAAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3h0AX179YtTRk6Nvr36dYp+8m1342zv2KvKS+LLn/pIVJWX5KRdruRrfx19HOSfOU23zjJ/nWUcQOejPlAoLXmf35bPAt2rq6P8G1+P7tXVbRpXe7cHdmrN64vXoo5ViMfbHOeex5TOqLJPfs/HoiRJknx0vHHjxshkMlFXVxcVFRX52AUAAAAAAJAS+c4NfMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/DYh7xd3xA3P/JSvF3f0Kb1AEDE+q3r41fL5sb6retbtBwAAADoGAKPfcjb9Q1xy6Mv7zXw2Nt6ACBiw9b1Me+FX8WGDwUbe1oOAAAAdAyBBwAAAAAAkHoCDwAAAAAAIPWKCz0AOl79lu2x4d1tu10OALTMpm2boq6hrsnvAAAAQOEIPPZB5//8L4UeAgCk3iULv1PoIQAAAAAf4JJWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqeceHvug686YEMNryndZ/tKaevf3AIAWuuLIK2NY5sDs7yvrVrivBwAAABSQwGMfVF7aI/qW9dztcgCgZfr07BOZkkyT3wEAAIDCcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw2IdUlZfElz/1kagqL2nTegAgom+vfnHKyKnRt1e/Fi0HAAAAOkZRkiRJPjreuHFjZDKZqKuri4qKinzsAgAAAAAASIl85wa+4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1ivPVcZIkERGxcePGfO0CAAAAAABIiffzgvfzg1zLW+BRW1sbERGDBw/O1y4AAAAAAICUqa2tjUwmk/N+8xZ49OvXLyIiXnvttbwMHEiHjRs3xuDBg+P111+PioqKQg8HKAB1AFAHgAi1AFAHgIi6uroYMmRINj/ItbwFHt267bw9SCaTUcCAqKioUAtgH6cOAOoAEKEWAOoA8H/5Qc77zUuvAAAAAAAAHUjgAQAAAAAApF7eAo+SkpK49NJLo6SkJF+7AFJALQDUAUAdACLUAkAdAPJfB4qSJEny0jMAAAAAAEAHcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqdfqwOONN96I008/PSorK6N3794xbty4WLx4cXZ9kiRx2WWXxcCBA6O0tDQ+9alPxd///vcmfTQ0NMT5558fVVVVUVZWFpMnT45Vq1a1/2iADtFcHZg/f34cd9xxUVVVFUVFRbF06dJd+lAHIP32Vgu2b98eF110URxyyCFRVlYWAwcOjDPOOCPefPPNJn2oBZBuzb0nuOyyy2LUqFFRVlYWffv2jc985jOxaNGiJn2oA5BuzdWBDzrnnHOiqKgofvjDHzZZrg5A+jVXC2bMmBFFRUVNfg4//PAmfagFkG4teU+wbNmymDx5cmQymSgvL4/DDz88Xnvttez6XNSBVgUeGzZsiIkTJ0aPHj3iD3/4Qzz//PMxe/bs2G+//bJtrr766rjmmmvi+uuvjz//+c9RU1MT//zP/xz19fXZNv/2b/8Wd955Z8ybNy8WLFgQmzZtihNOOCF27NjRqsEDHa8ldeDdd9+NiRMnxlVXXbXHftQBSLfmasHmzZvjmWeeiUsuuSSeeeaZmD9/frz44osxefLkJv2oBZBeLXlPMGLEiLj++uvj2WefjQULFsSwYcPi2GOPjbfeeivbRh2A9GpJHXjfXXfdFYsWLYqBAwfusk4dgHRraS2YNGlSrF69Ovvz+9//vsl6tQDSqyV14OWXX45PfOITMWrUqHj00Ufjr3/9a1xyySXRq1evbJuc1IGkFS666KLkE5/4xB7XNzY2JjU1NclVV12VXbZ169Ykk8kkP/7xj5MkSZJ33nkn6dGjRzJv3rxsmzfeeCPp1q1bct9997VmOEABNFcHPmjFihVJRCRLlixpslwdgPRrTS1439NPP51ERPLqq68mSaIWQNq1pQ7U1dUlEZE89NBDSZKoA5B2La0Dq1atSg444IDkueeeS4YOHZrMmTMnu04dgPRrSS2YPn16MmXKlD2uVwsg3VpSB04++eTk9NNP3+P6XNWBVn3D4+67744JEybEl770paiuro7x48fHzTffnF2/YsWKWLNmTRx77LHZZSUlJXHUUUfFwoULIyJi8eLFsX379iZtBg4cGGPHjs22ATqv5upAS6gDkH5tqQV1dXVRVFSU/QsPtQDSrbV1YNu2bXHTTTdFJpOJQw89NCLUAUi7ltSBxsbGmDZtWsyaNSsOPvjgXfpQByD9Wvqe4NFHH43q6uoYMWJEnHXWWbFu3brsOrUA0q25OtDY2Bj33ntvjBgxIo477riorq6Oj3/843HXXXdl2+SqDrQq8HjllVfixhtvjIMOOijuv//+OPfcc+OCCy6In//85xERsWbNmoiI6N+/f5Pt+vfvn123Zs2a6NmzZ/Tt23ePbYDOq7k60BLqAKRfa2vB1q1b4+KLL46pU6dGRUVFRKgFkHYtrQO/+93vok+fPtGrV6+YM2dOPPjgg1FVVRUR6gCkXUvqwPe///0oLi6OCy64YLd9qAOQfi2pBZ/97Gdj7ty58fDDD8fs2bPjz3/+cxxzzDHR0NAQEWoBpF1zdWDdunWxadOmuOqqq2LSpEnxwAMPxBe+8IU48cQT47HHHouI3NWB4tYMvLGxMSZMmBDf+973IiJi/Pjx8fe//z1uvPHGOOOMM7LtioqKmmyXJMkuyz6sJW2AwmtpHWgLdQDSozW1YPv27XHKKadEY2Nj3HDDDc32rRZAOrS0Dhx99NGxdOnSePvtt+Pmm2+Ok046KRYtWhTV1dV77FsdgHRorg4sXrw4/uu//iueeeaZVj+n1QFIj5a8Jzj55JOz7ceOHRsTJkyIoUOHxr333hsnnnjiHvtWCyAdmqsDjY2NERExZcqU+PrXvx4REePGjYuFCxfGj3/84zjqqKP22Hdr60CrvuExYMCAGDNmTJNlo0ePzt5JvaamJiJil8Rl3bp12W991NTUxLZt22LDhg17bAN0Xs3VgZZQByD9WloLtm/fHieddFKsWLEiHnzwwey3OyLUAki7ltaBsrKyGD58eBx++OFxyy23RHFxcdxyyy0RoQ5A2jVXB5544olYt25dDBkyJIqLi6O4uDheffXV+OY3vxnDhg2LCHUAuoK2/D/BgAEDYujQobF8+fKIUAsg7ZqrA1VVVVFcXNxstpCLOtCqwGPixInxwgsvNFn24osvxtChQyMi4sADD4yampp48MEHs+u3bdsWjz32WBx55JEREXHYYYdFjx49mrRZvXp1PPfcc9k2QOfVXB1oCXUA0q8lteD9sGP58uXx0EMPRWVlZZP2agGkW1vfEyRJkr18hToA6dZcHZg2bVr87W9/i6VLl2Z/Bg4cGLNmzYr7778/ItQB6Ara8p6gtrY2Xn/99RgwYEBEqAWQds3VgZ49e8Y//uM/7rVNzupAi29vniTJ008/nRQXFydXXnllsnz58mTu3LlJ7969k1/+8pfZNldddVWSyWSS+fPnJ88++2xy6qmnJgMGDEg2btyYbXPuuecmgwYNSh566KHkmWeeSY455pjk0EMPTd57773WDAcogJbUgdra2mTJkiXJvffem0REMm/evGTJkiXJ6tWrs23UAUi35mrB9u3bk8mTJyeDBg1Kli5dmqxevTr709DQkO1HLYD0aq4ObNq0Kfn2t7+d/OlPf0pWrlyZLF68OPnyl7+clJSUJM8991y2H3UA0qslnw0+bOjQocmcOXOaLFMHIN2aqwX19fXJN7/5zWThwoXJihUrkkceeSQ54ogjkgMOOMD/F0IX0ZL3BPPnz0969OiR3HTTTcny5cuT6667LunevXvyxBNPZNvkog60KvBIkiS55557krFjxyYlJSXJqFGjkptuuqnJ+sbGxuTSSy9NampqkpKSkuSTn/xk8uyzzzZps2XLluS8885L+vXrl5SWliYnnHBC8tprr7V2KECBNFcHbrvttiQidvm59NJLs23UAUi/vdWCFStW7LYORETyyCOPZNupBZBue6sDW7ZsSb7whS8kAwcOTHr27JkMGDAgmTx5cvL000836UMdgHRr7rPBh+0u8FAHIP32Vgs2b96cHHvsscn++++f9OjRIxkyZEgyffr0XZ7nagGkW0veE9xyyy3J8OHDk169eiWHHnpoctdddzVZn4s6UJQkSdKKb6cAAAAAAAB0Oq26hwcAAAAAAEBnJPAAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AACAdrvsssti3LhxhR4GAACwDytKkiQp9CAAAIDOq6ioaK/rp0+fHtdff300NDREZWVlB40KAACgKYEHAACwV2vWrMn++9e//nV897vfjRdeeCG7rLS0NDKZTCGGBgAAkOWSVgAAwF7V1NRkfzKZTBQVFe2y7MOXtJoxY0Z8/vOfj+9973vRv3//2G+//eLyyy+P9957L2bNmhX9+vWLQYMGxa233tpkX2+88UacfPLJ0bdv36isrIwpU6bEypUrO/aAAQCAVBJ4AAAAefHwww/Hm2++GY8//nhcc801cdlll8UJJ5wQffv2jUWLFsW5554b5557brz++usREbF58+Y4+uijo0+fPvH444/HggULok+fPjFp0qTYtm1bgY8GAADo7AQeAABAXvTr1y+uvfbaGDlyZMycOTNGjhwZmzdvjn//93+Pgw46KL797W9Hz54948knn4yIiHnz5kW3bt3ipz/9aRxyyCExevTouO222+K1116LRx99tLAHAwAAdHrFhR4AAADQNR188MHRrdv//Y1V//79Y+zYsdnfu3fvHpWVlbFu3bqIiFi8eHG89NJLUV5e3qSfrVu3xssvv9wxgwYAAFJL4AEAAORFjx49mvxeVFS022WNjY0REdHY2BiHHXZYzJ07d5e+9t9///wNFAAA6BIEHgAAQKfwsY99LH79619HdXV1VFRUFHo4AABAyriHBwAA0CmcdtppUVVVFVOmTIknnngiVqxYEY899lh87Wtfi1WrVhV6eAAAQCcn8AAAADqF3r17x+OPPx5DhgyJE088MUaPHh0zZ86MLVu2+MYHAADQrKIkSZJCDwIAAAAAAKA9fMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOr9fw+gShyFf/1LAAAAAElFTkSuQmCC\n", "text/plain": [ - "" + "" ] }, "execution_count": 4, @@ -178,7 +172,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "L3FQXT5FYnp-" @@ -189,7 +182,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -213,7 +206,7 @@ "" ] }, - "execution_count": 5, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -226,7 +219,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "hkzox7QIYnp_" @@ -240,7 +232,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "3hmFmLzFYnp_" @@ -251,55 +242,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": { "id": "xC05jFO_Ynp_", "outputId": "c5502632-56ae-4adb-8bdc-112deedc8893" }, "outputs": [ { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " Upload widget is only available when the cell has been executed in the\n", - " current browser session. Please rerun this cell to enable.\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Saving sample.wav to sample.wav\n" + "ename": "ModuleNotFoundError", + "evalue": "No module named 'google.colab'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipykernel_26526/1787874441.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mown_file\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfiles\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpopitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mOWN_FILE\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m'audio'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mown_file\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mnotebook\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'google.colab'" ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" } ], "source": [ @@ -314,7 +272,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "ctw4nLaPYnp_" @@ -326,7 +283,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "x9AQgDzFYnp_" @@ -398,7 +354,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "5MclWK2GYnp_" @@ -417,7 +372,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -444,12 +399,18 @@ }, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Login successful\n", - "Your token has been saved to /root/.huggingface/token\n" - ] + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "6e56329c30c0441c8d45df3975e75a76", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(HTML(value='
" + "" ] }, - "execution_count": 8, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -725,7 +572,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "DLhErS6wYnqB" @@ -738,7 +584,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 35, "metadata": { "id": "vNHQRTUIYnqB" }, @@ -751,7 +597,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -764,7 +610,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "diarization error rate = 19.2%\n" + "diarization error rate = 19.8%\n" ] } ], @@ -773,7 +619,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "Xz5QJV9nYnqB" @@ -786,7 +631,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -798,12 +643,12 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3de5AV5Z038N/AXMAZZkBwBjBcjILoiooh5RLdjUYDWFS8ZF9NjKISy42E9baJL2tF0JiViJbFurqlJaWuMUG34rKJQY1Ro64ogaASNbKIBDAqIxFkuA/K9PuHL2czAWRmOIdznuHzqTpVTPczTz99+vSve/rL6S7LsiwLAAAAAACAhHUp9gAAAAAAAAD2lsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsADAAAAAABInsBjFy666KIoKyvb6fXWW2/tdt7YsWNzvz948OBdtrnppptybd5+++0YN25cHHDAAVFfXx9XX311fPzxx7n5q1atim984xsxdOjQ6NKlS1x55ZU7jXP27NkxcuTI6NmzZ1RXV8exxx4bDzzwQGHfnMSlsm0jItatWxeTJk2Kfv36RVVVVQwdOjQee+yxwr05Cdux7S699NKd5k2aNCnKysrioosuatW2VLbzDg899FCUlZXFmWeeuZfvBgAAAADsn8qLsdAPN23bp8vrVV3Z7t8ZO3Zs3Hfffa2mHXTQQbudV1VV1ernG264IS655JJW03r06BEREdu3b49x48ZF375948UXX4xVq1bFBRdcEBUVFTFt2rSIiGhubo6DDjoorr322pgxY8Yux3jggQfG9773vRg2bFhUVlbGnDlzYsKECVFfXx9jxoxp9zrvrabmpn26vLqqug79Xgrbdtu2bfHlL3856uvr4+GHH46DDz44Vq5cGT179uzQOu+tLU1b9+nyutd1a/fvDBgwIB566KGYMWNGdO/ePSIitm7dGrNmzYqBAwe2alsq23mHFStWxHe/+934m7/5m3avNwAAAADwiaIEHqfd/Mw+Xd5vvt/+i/9VVVXRt2/fds/boUePHrtt86tf/SreeOONeOqpp6KhoSGOPfbY+MEPfhCTJ0+O66+/PiorK2Pw4MFx2223RUTEvffeu8t+TjrppFY/X3HFFXH//ffH3LlzixJ4jH/8G/t0eY+c+WiHfi+FbXvvvffG2rVr48UXX4yKioqI+ORbB8Xyowse3qfL+9bPz2/37xx33HGxbNmymD17dpx33nkR8cm3oAYOHBiHHHJIq7alsp0jPglPzjvvvPj+978fzz//fKxbt649qw0AAAAA/H9uaVUE8+bNi+HDh0dDQ0Nu2pgxY2L9+vXx+9//vkN9ZlkWTz/9dCxZsiT+9m//Nl9DpZ3ytW0feeSRGDVqVEyaNCkaGhriqKOOimnTpsX27dsLMexO45vf/Garb27ce++9MWHChLwvJ5/78A033BD19fVx8cUX53uYAAAAALBfEXjsxpw5c6Kmpib3Ovvss3c7r6amJncbmx0mT568U5vnn38+IiIaGxtbXSiNiNzPjY2N7RpnU1NT1NTURGVlZYwbNy5uv/32+PKXv9yRVd5vpLBt//CHP8TDDz8c27dvj8ceeyymTJkSt956a/zzP/9zR1d7v3D++efH3LlzY+XKlbFy5cp44YUX4vzzd/62SKls57lz58Y999wTM2fObO+qAgAAAAB/oSi3tErBySefHHfeeWfu5+rq6t3Oi/jkeRp/7uqrr849JHmHgw8+OO/j7NGjRyxatCg2btwYTz/9dPzjP/5jfPazn93pdlf8rxS2bUtLS9TX18fdd98dXbt2jc997nPx7rvvxi233BLXXXddXpfVmRx00EExbty4+Pd///fIsizGjRsXffr02aldKWznDRs2xPjx42PmzJm7HCMAAAAA0D5FCTwe/78nF2Ox7VJdXR2HHXZYu+ft0KdPn9226du3byxYsKDVtPfffz83rz26dOmSW86xxx4bixcvjh/+8IdFCTweOG3WPl9mR6Swbfv16xcVFRXRtWvX3LQjjjgiGhsbY9u2bVFZWdnmvvLhgh/9n326vL3xzW9+M/7hH/4hIiL+7d/+bZdtSmE7L1u2LFasWBFf+cpXctNaWloiIqK8vDyWLFkShx56aJv6AgAAAACKFHj0qt63F2tLzahRo+LGG2+M1atXR319fUREPPnkk1FbWxtHHnnkXvXd0tISzc3N+Rhmu9VV1RVluaUkX9v2hBNOiFmzZkVLS0t06fLJnefefPPN6Nev3z4POyIiutd12+fL7KixY8fGtm3boqysLMaMGVOQZeRjOw8bNixee+21VtOuvfba2LBhQ9x2220xYMCAvI8bAAAAADozt7TqgObm5p3u019eXt7qtjQbNmzYqc0BBxwQtbW1MXr06DjyyCNj/PjxcfPNN0djY2Nce+21MWnSpKiqqsq1X7RoUUREbNy4Mf70pz/FokWLorKyMndB9Yc//GGMHDkyDj300Ghubo7HHnssHnjggZ1u1UPblcq2nThxYtxxxx1xxRVXxGWXXRZLly6NadOmxeWXX16oVe80unbtGosXL879e1dKYTt369YtjjrqqFb99+zZMyJip+kAAAAAwJ4JPDrgl7/8ZfTr16/VtMMPPzz+53/+J/fz1KlTY+rUqa3afOtb34q77rorunbtGnPmzImJEyfGqFGjorq6Oi688MK44YYbWrUfMWJE7t8vvfRSzJo1KwYNGhQrVqyIiIhNmzbFt7/97XjnnXeie/fuMWzYsPjxj38cX/va1/K8xvuPUtm2AwYMiCeeeCKuuuqqOProo+Pggw+OK664IiZPnpznNe6camtrP3V+qWxnAAAAACB/yrIsy4o9CAAAAAAAgL3RpdgDAAAAAAAA2FsCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHnlheq4paUl3nvvvejRo0eUlZUVajEAAAAAAEACsiyLDRs2RP/+/aNLl/x/H6Nggcd7770XAwYMKFT3AAAAAABAgv74xz/GZz7zmbz3W7DAo0ePHhHxycBra2sLtRgAAAAAACAB69evjwEDBuTyg3wrWOCx4zZWtbW1Ag8AAAAAACAiomCPwfDQcgAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkCDwAAAAAAIHkFDzw2rd1c6EVAydi0dnMsfPB3e/W5X7t1bcxa/JNYu3VtHkcGHeczSWfUlnqdj5peSjat3RxP3vdMTH/wlnhn1bu7bWefp1hK/bNX6uPrjPbFe17q27XUxwewvynVulyq49pbnXW9KKyCBx5b1m0t9CKgZGz+cEu89NBrsfnDLR3u48Ota+OhJbPiQ8WcEuEzSWfUlnqdj5peSjZ/uCVeef61eKH7s9G4pnG37ezzFEupf/ZKfXyd0b54z0t9u5b6+AD2N6Val0t1XHurs64XheWWVgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPLKC72A5k3bYkuT53iwf2jeuC1vfW3ctjGampvy1h901MZtG4s9BCiY5o27P0/JZ00vNZs+3rTbY4x9nmIr1XMg+0bxFPIzkcp2LdX9AmB/U+rHjc52vCj195vSVPDA44kbn4vuFd0LvRjodKa8+L1iDwGg03t06tPFHkJR3LpiesSKYo8Cds05EH/JZ8J7AEDbOF6AW1oBAAAAAACdgMADAAAAAABInsADAAAAAABIXsGf4THme1+MwUcNLPRioCSsWfFh3u4H/4Mv3BiD6w7JS1+wN1Y0LXcfUDqtcTecEr0H99rlvHzW9FLzncGT49gjjtnlPPs8xVaq50D2jeIp5Gcile1aqvsFwP6m1I8bne14UervN6Wp4IFHVXVldK/rVujFQEmoqqnMW181lTVRV1WXt/6go2oqa4o9BCiYqprdn6fks6aXmury6t0eY+zzFFupngPZN4qnkJ+JVLZrqe4XAPubUj9udLbjRam/35Qmt7QCAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACS1/X666+/vhAdNzc3x0033RT/9E//FD16esAM+4+K7uXR/6iGqOxe0eE+unXtHsMPOjq6l3fP48ig43wm6YzaUq/zUdNLycdbPoqKqIwvHDkqanvU7radfZ5iKfXPXqmPrzPaF+95qW/XUh8fwP6mVOtyqY5rb3XW9dqf7cgNrrnmmqiqqsp7/2VZlmV57zUi1q9fH3V1ddHU1BS1tbv/gxoAAAAAAOj8Cp0buKUVAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIEHAAAAAACQPIFHotZuXRuzFv8k1m5d2ymWA3TcBxuaY+Yzb8UHG5qLPRQAOrHUjzepj5/S5HMFAB3jGLr/WlPgbS7wSNSHW9fGQ0tmxYcFDiL21XKAjvtgQ3Pc8+wyJwkAFFTqx5vUx09p8rkCgI5xDN1/rdko8AAAAAAAAPhUAg8AAAAAACB55cUeAHtn47aN0dTcVND+gTRs2PJRfLhpW7GHAUAntWHLR8UeQl44XpJPnWW/AIBicW62/9mw5eOC9i/wSNyUF79X7CEAJeKyHy0s9hAAoOQ5XgIAlA7nZvufj5s3FbR/t7QCAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACS5xkeifvBF26MwXWHFKz/FU3LPScEEnH7BSPjsL49ij0MADqptxo3dIp7LDtekk+dZb8AgGJxbrb/WbT0vfjS9ML1L/BIXE1lTdRV1RW0fyANPbpXRK/qymIPA4BOqkf3imIPIS8cL8mnzrJfAECxODfb//ToXthIwi2tAAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8AAAAAACA5Ak8EtWr24Hx9cO/Eb26HdgplgN0XJ8eVXHxSYdGnx5VxR4KAJ1Y6seb1MdPafK5AoCOcQzdf/WuKew2L8uyLCtEx+vXr4+6urpoamqK2traQiwCAAAAAABIRKFzA9/wAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkifwAAAAAAAAkldeqI6zLIuIiPXr1xdqEQAAAAAAQCJ25AU78oN8K1jgsWbNmoiIGDBgQKEWAQAAAAAAJGbNmjVRV1eX934LFngceOCBERHx9ttvF2TgQBrWr18fAwYMiD/+8Y9RW1tb7OEARaAOAOoAEKEWAOoAENHU1BQDBw7M5Qf5VrDAo0uXTx4PUldXp4ABUVtbqxbAfk4dANQBIEItANQB4H/zg7z3W5BeAQAAAAAA9iGBBwAAAAAAkLyu119//fUF67xr1zjppJOivLxgd84CEqAWAOoAoA4AEWoBoA4Aha0DZVmWZXnvFQAAAAAAYB9ySysAAAAAACB5Ag8AAAAAACB5Ag8AAAAAACB5Ag8AAAAAACB57Q483n333Tj//POjd+/e0b179xg+fHgsXLgwNz/Lspg6dWr069cvunfvHqeeemosXbq0VR9r166N8847L2pra6Nnz55x8cUXx8aNG/d+bYB9Yk91YPbs2TF69Ojo3bt3lJWVxaJFi3bqY+vWrTFp0qTo3bt31NTUxN/93d/F+++/vy9XA9hLn1YLPvroo5g8eXIMHz48qquro3///nHBBRfEe++916oP5wSQtj2dE1x//fUxbNiwqK6ujl69esWpp54a8+fPb9WHOgBp21Md+HOXXnpplJWVxb/8y7+0mq4OQPr2VAsuuuiiKCsra/UaO3Zsqz7UAkhbW84JFi9eHKeffnrU1dVFdXV1fP7zn4+33347Nz8f1wvbFXh8+OGHccIJJ0RFRUU8/vjj8cYbb8Stt94avXr1yrW5+eab41//9V/jrrvuivnz50d1dXWMGTMmtm7dmmtz3nnnxe9///t48sknY86cOfHf//3f8fd///ftGjhQHG2pA5s2bYoTTzwxpk+fvtt+rrrqqvjFL34RP/3pT+O5556L9957L7761a/ui1UA8mBPtWDz5s3x8ssvx5QpU+Lll1+O2bNnx5IlS+L0009v1Y9zAkhXW84Jhg4dGnfccUe89tprMXfu3Bg8eHCMHj06/vSnP+XaqAOQrrbUgR3+67/+K37zm99E//79d5qnDkDa2loLxo4dG6tWrcq9HnzwwVbz1QJIV1vqwLJly+LEE0+MYcOGxbPPPhuvvvpqTJkyJbp165Zrk5frhVk7TJ48OTvxxBN3O7+lpSXr27dvdsstt+SmrVu3LquqqsoefPDBLMuy7I033sgiIvvtb3+ba/P4449nZWVl2bvvvtue4QBFsKc68OeWL1+eRUT2yiuvtJq+bt26rKKiIvvpT3+am7Z48eIsIrJ58+bldbxAYbSnFuywYMGCLCKylStXZlnmnABS15E60NTUlEVE9tRTT2VZpg5A6tpaB955553s4IMPzl5//fVs0KBB2YwZM3Lz1AFIX1tqwYUXXpidccYZu52vFkDa2lIHvva1r2Xnn3/+bufn63phu77h8cgjj8TIkSPj7LPPjvr6+hgxYkTMnDkzN3/58uXR2NgYp556am5aXV1dHH/88TFv3ryIiJg3b1707NkzRo4cmWtz6qmnRpcuXXb6ejtQevZUB9ripZdeio8++qhVrRg2bFgMHDgwVyuA0taRWtDU1BRlZWXRs2fPiHBOAKlrbx3Ytm1b3H333VFXVxfHHHNMRKgDkLq21IGWlpYYP358XH311fFXf/VXO/WhDkD62npO8Oyzz0Z9fX0cfvjhMXHixFizZk1unloAadtTHWhpaYlHH300hg4dGmPGjIn6+vo4/vjj42c/+1muTb6uF7Yr8PjDH/4Qd955ZwwZMiSeeOKJmDhxYlx++eVx//33R0REY2NjREQ0NDS0+r2GhobcvMbGxqivr281v7y8PA488MBcG6B07akOtEVjY2NUVlbmLnru8Oe1Aiht7a0FW7dujcmTJ8e5554btbW1EeGcAFLX1jowZ86cqKmpiW7dusWMGTPiySefjD59+kSEOgCpa0sdmD59epSXl8fll1++yz7UAUhfW2rB2LFj40c/+lE8/fTTMX369HjuuefitNNOi+3bt0eEWgCp21MdWL16dWzcuDFuuummGDt2bPzqV7+Ks846K7761a/Gc889FxH5u15Y3p6Bt7S0xMiRI2PatGkRETFixIh4/fXX46677ooLL7ywPV0BiVIHgIj21YKPPvoozjnnnMiyLO68885iDBcogLbWgZNPPjkWLVoUH3zwQcycOTPOOeecmD9//k4XNYD07KkOvPTSS3HbbbfFyy+/HGVlZUUeLVAobTkn+PrXv55rP3z48Dj66KPj0EMPjWeffTZOOeWUoowbyJ891YGWlpaIiDjjjDPiqquuioiIY489Nl588cW466674otf/GLextKub3j069cvjjzyyFbTjjjiiNyT1Pv27RsRsdOT099///3cvL59+8bq1atbzf/4449j7dq1uTZA6dpTHWiLvn37xrZt22LdunWtpv95rQBKW1trwY6wY+XKlfHkk0/mvt0R4ZwAUtfWOlBdXR2HHXZY/PVf/3Xcc889UV5eHvfcc09EqAOQuj3Vgeeffz5Wr14dAwcOjPLy8igvL4+VK1fGd77znRg8eHBEqAPQGXTkOsFnP/vZ6NOnT7z11lsRoRZA6vZUB/r06RPl5eV7zBbycb2wXYHHCSecEEuWLGk17c0334xBgwZFRMQhhxwSffv2jaeffjo3f/369TF//vwYNWpURESMGjUq1q1bFy+99FKuza9//etoaWmJ448/vj3DAYpgT3WgLT73uc9FRUVFq1qxZMmSePvtt3O1AihtbakFO8KOpUuXxlNPPRW9e/du1d45AaSto+cELS0t0dzcHBHqAKRuT3Vg/Pjx8eqrr8aiRYtyr/79+8fVV18dTzzxRESoA9AZdOSc4J133ok1a9ZEv379IkItgNTtqQ5UVlbG5z//+U9tk7frhW1+vHmWZQsWLMjKy8uzG2+8MVu6dGn2k5/8JDvggAOyH//4x7k2N910U9azZ8/s5z//efbqq69mZ5xxRnbIIYdkW7ZsybUZO3ZsNmLEiGz+/PnZ3LlzsyFDhmTnnntue4YCFElb6sCaNWuyV155JXv00UeziMgeeuih7JVXXslWrVqVa3PppZdmAwcOzH79619nCxcuzEaNGpWNGjWqGKsEdMCeasG2bduy008/PfvMZz6TLVq0KFu1alXu1dzcnOvHOQGka091YOPGjdk111yTzZs3L1uxYkW2cOHCbMKECVlVVVX2+uuv5/pRByBdbfnb4C8NGjQomzFjRqtp6gCkbU+1YMOGDdl3v/vdbN68edny5cuzp556KjvuuOOyIUOGZFu3bs31oxZAutpyTjB79uysoqIiu/vuu7OlS5dmt99+e9a1a9fs+eefz7XJx/XCdgUeWZZlv/jFL7Kjjjoqq6qqyoYNG5bdfffdrea3tLRkU6ZMyRoaGrKqqqrslFNOyZYsWdKqzZo1a7Jzzz03q6mpyWpra7MJEyZkGzZsaO9QgCLZUx247777sojY6XXdddfl2mzZsiX79re/nfXq1Ss74IADsrPOOqtVIAKUvk+rBcuXL99lHYiI7Jlnnsm1c04Aafu0OrBly5bsrLPOyvr3759VVlZm/fr1y04//fRswYIFrfpQByBte/rb4C/tKvBQByB9n1YLNm/enI0ePTo76KCDsoqKimzQoEHZJZdckjU2NrbqQy2AtLXlnOCee+7JDjvssKxbt27ZMccck/3sZz9rNT8f1wvLsizL2vHtFAAAAAAAgJLTrmd4AAAAAAAAlCKBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAsNcuuuiiOPPMM4s9DAAAYD9WXuwBAAAApa2srOxT51933XVx2223RZZl+2hEAAAAOxN4AAAAn2rVqlW5f//Hf/xHTJ06NZYsWZKbVlNTEzU1NcUYGgAAQI5bWgEAAJ+qb9++uVddXV2UlZW1mlZTU7PTLa1OOumkuOyyy+LKK6+MXr16RUNDQ8ycOTM2bdoUEyZMiB49esRhhx0Wjz/+eKtlvf7663HaaadFTU1NNDQ0xPjx4+ODDz7Y16sMAAAkSOABAAAUxP333x99+vSJBQsWxGWXXRYTJ06Ms88+O77whS/Eyy+/HKNHj47x48fH5s2bIyJi3bp18aUvfSlGjBgRCxcujF/+8pfx/vvvxznnnFPkNQEAAFIg8AAAAArimGOOiWuvvTaGDBkS11xzTXTr1i369OkTl1xySQwZMiSmTp0aa9asiVdffTUiIu64444YMWJETJs2LYYNGxYjRoyIe++9N5555pl48803i7w2AABAqfMMDwAAoCCOPvro3L+7du0avXv3juHDh+emNTQ0RETE6tWrIyLid7/7XTzzzDO7fB7IsmXLYujQoQUeMQAAkDKBBwAAUBAVFRWtfi4rK2s1raysLCIiWlpaIiJi48aN8ZWvfCWmT5++U1/9+vUr4EgBAIDOQOABAACUhOOOOy7+8z//MwYPHhzl5f5UAQAA2sczPAAAgJIwadKkWLt2bZx77rnx29/+NpYtWxZPPPFETJgwIbZv317s4QEAACVO4AEAAJSE/v37xwsvvBDbt2+P0aNHx/Dhw+PKK6+Mnj17Rpcu/nQBAAA+XVmWZVmxBwEAAAAAALA3/DcpAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgedWHukUAAABXSURBVAIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgef8P+AlOrStvWy0AAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAqrUlEQVR4nO3de5hVdb0/8M/AMMPcuclNbpYBIl7hd5LoydRUvARpJikiRik8J7Urx0vmJbPUE2JWWiqCFkWmQJZFXhIEQVSEE6YHUVFQQRRwYIDhNuv3hw/7OA4wMzC3Nb5ezzPPM7PWd3+/373X2p+1Z7/3XisrSZIkAAAAAAAAUqxFY08AAAAAAABgfwk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB67ccEFF0RWVlaVn1deeWWP64YMGZK5fa9evXbb5sYbb8y0WbFiRXzxi1+MgoKC6NChQ1x66aWxbdu2zPry8vK44IIL4rDDDovs7Oz40pe+VGWec+fOjcGDB0f79u0jLy8v+vbtGxMmTKjXxybt0rJtIyK2bt0aP/jBD6Jnz56Rm5sbn/zkJ+Oee+6pt8cm7XZtv7Fjx1ZZ95//+Z+RlZUVF1xwQaW2TWVb7/LUU09FdnZ2HHnkkfv1WDR3db2tIyLmzZsXp556arRt2zZat24dhx12WIwfPz527txZqd369etj5MiRUVJSEiUlJTFy5Mh4//33K7X51re+FQMGDIjc3NzdbsvXX399t3OaOXPmfj0uzdmaNWtizJgx0aNHj8jNzY3OnTvHySefHPPnz4+Iys/X/Pz86N+/f/zmN7/J3H7y5Mm7fcxbt25dZax58+ZFy5Ytq+wjEf+37RYvXpxZtnHjxvj85z8fffv2jZUrV0ZE7HasrKysmDp1akREzJo1q9Ly9u3bx/HHHx9PPfVUrR6XBx98MPr16xe5ubnRr1+/mD59eqX1d9xxRxx++OFRXFwcxcXFMWjQoPj73/9eqzEAAAAgTbIbY9D1m7ZV36gOtS3IqfVthgwZEpMmTaq07IADDtjjutzc3Ep//+hHP4oLL7yw0rKioqKIiNi5c2ecdtppccABB8TcuXNj7dq1MWrUqEiSJH7xi19k2uTl5cWll14aDz744G7nWFBQEBdffHEcfvjhUVBQEHPnzo0xY8ZEQUFBXHTRRbW+z3WhdGtpg41VkluyT7dLw7aNiDj77LPjnXfeiYkTJ8bBBx8ca9asiR07duzTfd5fW0rLG3S8vJKqb0LWRPfu3WPq1KkxYcKEyMvLi4gPQoc//OEP0aNHj0ptm9K2jogoLS2N888/P0444YR45513an/n68jOtWsbdLyW7dvv0+3qcltPnz49zj777Pja174WTzzxRLRp0yYee+yx+K//+q94+umn4/7774+srKyIiDj33HPjzTffzIQTF110UYwcOTL+8pe/ZPpLkiRGjx4dCxYsiH/96197vA+PPfZYHHrooZm/27Vrt0+Pxf7asmVLg463a3vVxpe//OXYvn173HvvvfGJT3wi3nnnnXj88cdj3bp1mTa7nq9lZWUxefLkGDt2bLRp0yaGDx8eERHFxcWxdOnSSv3u2q4fds8998Qll1wSd999d6xYsaLK/vRh7777bpxyyikR8cGHEDp06JBZN2nSpCqhSZs2bSr9vXTp0iguLo533303fvzjH8dpp50WL7/8cnTs2LHax2T+/PkxfPjwuP766+OMM87I7Mdz586NT3/60xER0a1bt7jxxhvj4IMPjoiIe++9N4YNGxaLFi2qtO8BAABAc9EogccpNz/RoOM9fd3Jtb7Nrk+Q1nbdLkVFRXts88gjj8SLL74YK1eujK5du0ZExPjx4+OCCy6IG264IYqLi6OgoCDuuOOOiPjgU98f/QRxRMRRRx0VRx11VObvXr16xbRp02LOnDmNFniM/Pu5DTbWQ196eJ9ul4ZtO3PmzJg9e3a89tprmTdBe/XqVcN7WPfuO/+BBh1vzJ/P26fbHX300fHaa6/FtGnTYsSIERERMW3atOjevXt84hOfqNS2qWzrXcaMGRPnnntutGzZMmbMmFHDe1z3Vh9+ZIOOd+BbK/fpdnW1rTdt2hQXXnhhDB06NO68887M8m984xvRqVOnGDp0aNx///0xfPjweOmll2LmzJnx9NNPZ95Qvuuuu2LQoEGxdOnS6NOnT0RE3HbbbRHxwZvhews82rdvX+0+2BB++9vfNuh4tT0+vf/++zF37tyYNWtWHHvssRER0bNnz/iP//iPSu0+/Hz98Y9/HPfff3/MmDEjE3hkZWVV+3hv2rQp7r///nj22Wdj9erVMXny5Lj66qt323blypVx4oknRpcuXeKhhx7KhKG7tGnTptrxOnbsmGl31VVXxf333x8LFiyIL37xi3u9XUTErbfeGieeeGJcccUVERFxxRVXxOzZs+PWW2+NP/zhDxERVfq54YYb4o477oinn35a4AEAAECz5JRWjWD+/PnRv3//zJukEREnn3xybN26NRYuXLjP/S5atCjmzZuXeUOIhldX2/ahhx6KgQMHxs033xwHHnhg9O7dO77//e83+Cex0+hrX/tapU/z33PPPTF69Og6H6cun8eTJk2KV199Na655pq6nmazVhfb+pFHHom1a9fG97///SrrvvjFL0bv3r0zbx7Pnz8/SkpKMmFHRMQxxxwTJSUlMW/evFrPf+jQodGxY8cYPHhwPPBAw4aKaVJYWBiFhYUxY8aM2Lp1a41v17p169i+fXutxvrjH/8Yffr0iT59+sR5550XkyZNiiRJqrRbunRpDB48OPr27RszZ86sEnbU1ubNmzP7cqtWrWp0m/nz58dJJ51UadnJJ5+8x31x586dMXXq1Ni0aVMMGjRov+YLAAAATZXAYw/++te/Zt5kKSwsjK985St7XFdYWBjXX399pdtfdtllVdrMmjUrIiJWr14dnTp1qtS+bdu2kZOTE6tXr671XLt16xa5ubkxcODA+OY3vxnf+MY3an+HP0bSsG1fe+21mDt3brzwwgsxffr0uPXWW+OBBx6Ib37zm/t+xz8mRo4cGXPnzo3XX3893njjjXjqqafivPOqfmOkqWzrZcuWxeWXXx5TpkyJ7OxG+dJdatXFtn755ZcjIuKQQw7Z7Rh9+/bNtFm9evVuTzXUsWPHWm3zwsLCuOWWW+KBBx6Iv/3tb3HCCSfE8OHD43e/+12N+/g4yc7OjsmTJ8e9994bbdq0icGDB8eVV165x2/P7NixIyZPnhxLliyJE044IbO8tLS0yn7w0cBg4sSJmX1oyJAhUVZWFo8//niVMc4///z45Cc/GQ8++GCVU+Htcs4551QZ77XXXqvUplu3bpl1EyZMiAEDBlSa897srgZ16tSpyr64ZMmSKCwsjNzc3Bg7dmxMnz49+vXrV6MxAAAAIG28u7YHxx13XOZUNBEfXC9jT+siqp57fdy4cZmL5u5y4IEHZn7f3XnDkyTZ7fLqzJkzJ8rKyuLpp5+Oyy+/PA4++OA455xzat3Px0Uatm1FRUVkZWXFlClToqTkg2uV3HLLLXHWWWfFr371q306B/7HRYcOHeK0006Le++9N5IkidNOO63SefV3aQrbeufOnXHuuefGddddF717967Rbfg/dbmtd/cp/l3LP7w96+L53aFDh/jOd76T+XvgwIGxfv36uPnmm3cb2PDBNTxOO+20mDNnTsyfPz9mzpwZN998c9x9992Z5+hll10WV111VWzdujVycnJi3LhxMWbMmEwfRUVF8fzzz1fq98O1dOnSpfHMM8/EtGnTIuKDoGX48OFxzz33xBe+8IVKtxs2bFhMnz49HnzwwTj77LN3O+cJEyZUuV337t0r/T1nzpwoKCiIRYsWxWWXXRaTJ0+u8Tc8Iqruj7vbF/v06ROLFy+O999/Px588MEYNWpUzJ49W+gBAABAs9Qogcff/+u4xhi2VgoKCjIX+azNul06dOiwxzadO3eOBQsWVFq2fv362L59e5VPa9bEQQcdFBERhx12WLzzzjtx7bXXNlrg8dtTft8o49ZGGrZtly5d4sADD8yEHREffAI9SZJ4880341Of+lSN+6oL5993VoOOt79Gjx4dF198cURE/OpXv9ptm6awrTdu3BjPPfdcLFq0KDPfioqKSJIksrOz45FHHonjjz++Rn3Vlc7/Wtyg4+2v/d3Wu4Kml156KT7zmc9UWf+///u/mTeGO3fuvNsLyr/77rv7VLs/7Jhjjom77757v/rYVyNHjmyUcWurdevWceKJJ8aJJ54YV199dXzjG9+Ia665JhN47Aoo8/Pzo0uXLlXe+G/RosVen/MTJ06MHTt2VAo1kySJVq1axfr166Nt27aZ5VdeeWUcfvjhMWLEiEiSJHOdkA/r3LlztTXmoIMOijZt2kTv3r2jvLw8zjjjjHjhhRf2+K2Rj/b/0W9zrFmzpsq+mJOTk5nHwIED49lnn42f//zn8Zvf/KbaMQAAACBtGiXwaFuQ0xjDNhmDBg2KG264IVatWhVdunSJiA/OI5+bmxsDBgzYr76TJKnVOc7rWkluSfWNmrG62raDBw+OP/3pT1FWVhaFhYUR8cGpd1q0aBHdunWrl7nvTV5J6wYfc38MGTIktm3bFhEfnNO+PtTFti4uLo4lS5ZUWnb77bfHP//5z3jggQcyYWZDatm+fYOPuT/2d1ufdNJJ0a5duxg/fnyVwOOhhx6KZcuWZU5/NWjQoCgtLY1nnnkmc8HsBQsWRGlp6W7DktpYtGhRZj9qaGn9xli/fv1ixowZmb/3FlBWZ8eOHXHffffF+PHjq5zm6stf/nJMmTIlE6ztctVVV0V2dnaMGDEiKioq9vuDBiNHjowf/ehHcfvtt1f6BtCeDBo0KB599NFKbR955JFq98XGfp0AAAAA9ckprfbB1q1bq3yqMjs7u9KpVDZu3FilTX5+fhQXF8dJJ50U/fr1i5EjR8Z///d/x7p16+L73/9+XHjhhVFcXJxp/+KLL8a2bdti3bp1sXHjxli8eHFERBx55JER8cGnmXv06BF9+/aNiIi5c+fGz372s7jkkkvq4V5/PDSVbXvuuefG9ddfH1/72tfiuuuui/feey/GjRsXo0ePTu2bkw2pZcuW8dJLL2V+352msK1btGgR/fv3r9R/x44do3Xr1lWWs3v7u60LCgriN7/5TXz1q1+Niy66KC6++OIoLi6Oxx9/PMaNGxdnnXVW5pRFhxxySAwZMiQuvPDCzKfjL7roojj99NOjT58+mb5feeWVKCsri9WrV8eWLVsy27xfv36Rk5MT9957b7Rq1SqOOuqoaNGiRfzlL3+J2267LW666aa6fniahbVr18ZXvvKVGD16dBx++OFRVFQUzz33XNx8880xbNiwGveTJMlur7XSsWPH+Otf/xrr16+Pr3/965W+WRcRcdZZZ8XEiROrBB4REZdffnm0bNkyRo4cGRUVFTFixIjMuvfff7/KeEVFRZVOo/hhLVq0iG9/+9vx4x//OMaMGRP5+fl7vT/f+ta34nOf+1zcdNNNMWzYsPjzn/8cjz32WMydOzfT5sorr4xTTjklunfvHhs3boypU6fGrFmzYubMmXvtGwAAAFIroYpRo0Ylw4YN2+O6iKjy06dPn0ybnj177rbNmDFjMm3eeOON5LTTTkvy8vKSdu3aJRdffHFSXl5eaaw99bPLbbfdlhx66KFJfn5+UlxcnBx11FHJ7bffnuzcubNuH5BmJC3bNkmS5KWXXkq+8IUvJHl5eUm3bt2S7373u8nmzZvr7sFoZva2bZMkSYYNG5aMGjUq07YpbesPu+aaa5Ijjjhinx6Dj4u63tZJkiRPPvlkMmTIkKSkpCTJyclJ+vXrl/zsZz9LduzYUand2rVrkxEjRiRFRUVJUVFRMmLEiGT9+vWV2hx77LG7HXP58uVJkiTJ5MmTk0MOOSTJz89PioqKkgEDBiS//e1v9/dhabbKy8uTyy+/PDn66KOTkpKSJD8/P+nTp09y1VVXZWpiz549kwkTJuyxj0mTJu12m0REsmrVquT0009PTj311N3eduHChUlEJAsXLkyWL1+eRESyaNGiSm3Gjx+ftGzZMrnvvvuSJEn2ONZPf/rTJEmS5Iknnkgiosq+U1ZWlrRt2za56aabavTY/OlPf0r69OmTtGrVKunbt2/y4IMPVlo/evTopGfPnklOTk5ywAEHJCeccELyyCOP1KhvAAAASKOsJNnDlVoBAAAAAABSokVjTwAAAAAAAGB/CTwAAJqYwsLCPf7MmTOnsacHAAAATZJTWgEANDGvvPLKHtcdeOCBkZeX14CzAQAAgHQQeAAAAAAAAKnnlFYAAAAAAEDqCTwAAAAAAIDUy66vjisqKuLtt9+OoqKiyMrKqq9hAAAAAACAFEiSJDZu3Bhdu3aNFi3q/vsY9RZ4vP3229G9e/f66h4AAAAAAEihlStXRrdu3eq833oLPIqKiiLig4kXFxfX1zAAAAAAAEAKbNiwIbp3757JD+pavQUeu05jVVxcLPAAAAAAAAAiIurtMhguWg4AAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUq/eA49N6zbX9xDQIDat2xzP/eF/GmSfXle+Ln7/0pRYV76u3seCmrJf0tx8eJ9eu2JpzPnuqJh13bXx729eHBueey42jL8ldr7zTpXbbd68OZ577rnYvHlzpd/ToiZzTuP9onmo72ONY1k61WS7Nedt25zvG0BTl6YanJa5vvLe6rj0/unxynurG3sqNJK1G7fWa//1Hnhseb+8voeABrF5/ZZYOHVJbF6/pd7HWl++LqYu/X2sb+IHKT5e7Jc0Nx/ep0vffDXaz34xVkdWtJnx59jy4oux8ZYJsXPNmiq327x5czz//POZwGPX72lRkzmn8X7RPNT3scaxLJ1qst2a87ZtzvcNoKlLUw1Oy1xfX7s2nvl3fry+dm1jT4VGsrYs5YEHAAAAAABAfRN4AAAAAAAAqSfwAAAAAAAAUi+7vgfYumlbbCl1HQ/Sb2vZtgYfs2xbWZRuLW3wcWF3yraVNfYUoF6UbSuLlts3V/oUSFJW/f5eXp7u1zfl5eWxZcvur0uV9vtG+tXXayDHsnTb237xcdi2/jcAaHhpPL409ePFlh0fXCdwU/nOWL+p4d9ro/Ft3LKjXvuv98DjHzfMjrxWefU9DDRLP5z3g8aeAkCz98N5P4juKzbHNz+0bOcNP632dn/729/qb1INIO3zp3nzGojd+bjvFx/3+w9AzTT148X2zQdExDnx02mr4qexqrGnQyPYsXVTvfbvlFYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpV+/X8Dj5B8dGr/496nsYqHdrX18fD1/9eIOOef1nboheJQc16JiwJ6+XLm/y5wKFfXH9Z26Ilm1fiYibMsta/uCKaq/jceqpp0ZEeq+Fceqpp0b79u13u27t2rWpvV80D/X1GsixLN32tl98HLat/w0AGl4ajy9N/Xjx5Csvxw1L18UVZ3aJzx3ct7GnQyNYvOztOP6m6tvtq3oPPHILciKvpHV9DwP1Lrcwp8HHLMwpjJLckgYfF3anMKewsacA9aIwpzCyWuVH+YeWZRVWv7+3bp3u1zetW7eOvLy8Pa6DxlRfr4Ecy9Jtb/vFx2Hb+t8AoOGl8fjS1I8Xedn5EbEuClq3jLYFDf9eG42vKK9+IwmntAIAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB69X7R8rw2LnpJ85DfNi8GfPWwyG+7+wu81qW2rdvFV/ucG21bt6v3saCm7Jc0Nx/ep5Nun4y3ju0XnSOJ9780LLr36xfx3e9Ey44dq9wuPz8/jj766MjPz4+IqPR7Gnx0/vvaBupDfR9rHMvSqSbbrTlv2+Z83wCaujTV4LTMtVf79vEfh74cvdq3b+yp0EjaF+bWa/9ZSZIk9dHxhg0boqSkJEpLS6O4uLg+hgAAAAAAAFKivnMDp7QCAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AI+XWla+L3780JdaVr2uS/QEN472NW+OuJ16J9zZubeypANBMNYdjTXO4DzQt9ikA2HfrytfFxCV3x8Qld3svkjoj8Ei59eXrYurS38f6OioKdd0f0DDe27g1Js561T/bANSb5nCsaQ73gabFPgUA+259+br486vT48+vTvdeJHVG4AEAAAAAAKSewAMAAAAAAEi97MaeAHWjbFtZlG4trZN+gPTauGV7rN+0rbGnAUAztHHL9saeQp1xvKSuNKfnBQBAcyDwaCZ+OO8HjT0FoAm45L7nGnsKANDkOV4CAEDz5JRWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqecaHs3E9Z+5IXqVHLTf/bxeutz1QCDFfnH+wDi4c1FjTwOAZuiV1RubzbUvHC+pK83peQEA0BwIPJqJwpzCKMktqZN+gPQqymsVbQtyGnsaADRDRXmtGnsKdcbxkrrSnJ4XAADNgVNaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD0XLU+5tq3bxVf7nBttW7drkv0BDaNDUW58/fOfjA5FuY09FQCaqeZwrGkO94GmxT4FAPuubet2MeyTZ2R+h7qQlSRJUh8db9iwIUpKSqK0tDSKi4vrYwgAAAAAACAl6js3cEorAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1suur4yRJIiJiw4YN9TUEAAAAAACQErvygl35QV2rt8Bj7dq1ERHRvXv3+hoCAAAAAABImbVr10ZJSUmd91tvgUe7du0iImLFihX1MnEgHTZs2BDdu3ePlStXRnFxcWNPB2gE6gCgDgARagGgDgARpaWl0aNHj0x+UNfqLfBo0eKDy4OUlJQoYEAUFxerBfAxpw4A6gAQoRYA6gDwf/lBnfdbL70CAAAAAAA0IIEHAAAAAACQevUWeOTm5sY111wTubm59TUEkAJqAaAOAOoAEKEWAOoAUP91ICtJkqReegYAAAAAAGggTmkFAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9WodeLz11ltx3nnnRfv27SM/Pz+OPPLIWLhwYWZ9kiRx7bXXRteuXSMvLy8+//nPx7///e9KfWzdujUuueSS6NChQxQUFMTQoUPjzTff3P97AzSI6urAtGnT4uSTT44OHTpEVlZWLF68uEof6gCk395qwfbt2+Oyyy6Lww47LAoKCqJr165x/vnnx9tvv12pD7UA0q261wTXXntt9O3bNwoKCqJt27bxhS98IRYsWFCpD3UA0q26OvBhY8aMiaysrLj11lsrLVcHIP2qqwUXXHBBZGVlVfo55phjKvWhFkC61eQ1wUsvvRRDhw6NkpKSKCoqimOOOSZWrFiRWV8XdaBWgcf69etj8ODB0apVq/j73/8eL774YowfPz7atGmTaXPzzTfHLbfcEr/85S/j2Wefjc6dO8eJJ54YGzduzLT59re/HdOnT4+pU6fG3Llzo6ysLE4//fTYuXNnrSYPNLya1IFNmzbF4MGD48Ybb9xjP+oApFt1tWDz5s3x/PPPxw9/+MN4/vnnY9q0afHyyy/H0KFDK/WjFkB61eQ1Qe/eveOXv/xlLFmyJObOnRu9evWKk046Kd59991MG3UA0qsmdWCXGTNmxIIFC6Jr165V1qkDkG41rQVDhgyJVatWZX7+9re/VVqvFkB61aQOvPrqq/HZz342+vbtG7NmzYr/+Z//iR/+8IfRunXrTJs6qQNJLVx22WXJZz/72T2ur6ioSDp37pzceOONmWXl5eVJSUlJ8utf/zpJkiR5//33k1atWiVTp07NtHnrrbeSFi1aJDNnzqzNdIBGUF0d+LDly5cnEZEsWrSo0nJ1ANKvNrVgl2eeeSaJiOSNN95IkkQtgLTblzpQWlqaRETy2GOPJUmiDkDa1bQOvPnmm8mBBx6YvPDCC0nPnj2TCRMmZNapA5B+NakFo0aNSoYNG7bH9WoBpFtN6sDw4cOT8847b4/r66oO1OobHg899FAMHDgwvvKVr0THjh3jqKOOirvuuiuzfvny5bF69eo46aSTMstyc3Pj2GOPjXnz5kVExMKFC2P79u2V2nTt2jX69++faQM0XdXVgZpQByD99qUWlJaWRlZWVuYTHmoBpFtt68C2bdvizjvvjJKSkjjiiCMiQh2AtKtJHaioqIiRI0fGuHHj4tBDD63ShzoA6VfT1wSzZs2Kjh07Ru/evePCCy+MNWvWZNapBZBu1dWBioqKePjhh6N3795x8sknR8eOHePTn/50zJgxI9OmrupArQKP1157Le6444741Kc+Ff/4xz9i7Nixcemll8Z9990XERGrV6+OiIhOnTpVul2nTp0y61avXh05OTnRtm3bPbYBmq7q6kBNqAOQfrWtBeXl5XH55ZfHueeeG8XFxRGhFkDa1bQO/PWvf43CwsJo3bp1TJgwIR599NHo0KFDRKgDkHY1qQM33XRTZGdnx6WXXrrbPtQBSL+a1IJTTjklpkyZEv/85z9j/Pjx8eyzz8bxxx8fW7dujQi1ANKuujqwZs2aKCsrixtvvDGGDBkSjzzySJxxxhlx5plnxuzZsyOi7upAdm0mXlFREQMHDoyf/OQnERFx1FFHxb///e+444474vzzz8+0y8rKqnS7JEmqLPuomrQBGl9N68C+UAcgPWpTC7Zv3x5f/epXo6KiIm6//fZq+1YLIB1qWgeOO+64WLx4cbz33ntx1113xdlnnx0LFiyIjh077rFvdQDSobo6sHDhwvj5z38ezz//fK2f0+oApEdNXhMMHz48075///4xcODA6NmzZzz88MNx5pln7rFvtQDSobo6UFFRERERw4YNi+985zsREXHkkUfGvHnz4te//nUce+yxe+y7tnWgVt/w6NKlS/Tr16/SskMOOSRzJfXOnTtHRFRJXNasWZP51kfnzp1j27ZtsX79+j22AZqu6upATagDkH41rQXbt2+Ps88+O5YvXx6PPvpo5tsdEWoBpF1N60BBQUEcfPDBccwxx8TEiRMjOzs7Jk6cGBHqAKRddXVgzpw5sWbNmujRo0dkZ2dHdnZ2vPHGG/G9730vevXqFRHqADQH+/I+QZcuXaJnz56xbNmyiFALIO2qqwMdOnSI7OzsarOFuqgDtQo8Bg8eHEuXLq207OWXX46ePXtGRMRBBx0UnTt3jkcffTSzftu2bTF79uz4zGc+ExERAwYMiFatWlVqs2rVqnjhhRcybYCmq7o6UBPqAKRfTWrBrrBj2bJl8dhjj0X79u0rtVcLIN329TVBkiSZ01eoA5Bu1dWBkSNHxr/+9a9YvHhx5qdr164xbty4+Mc//hER6gA0B/vymmDt2rWxcuXK6NKlS0SoBZB21dWBnJyc+H//7//ttU2d1YEaX948SZJnnnkmyc7OTm644YZk2bJlyZQpU5L8/Pzkd7/7XabNjTfemJSUlCTTpk1LlixZkpxzzjlJly5dkg0bNmTajB07NunWrVvy2GOPJc8//3xy/PHHJ0cccUSyY8eO2kwHaAQ1qQNr165NFi1alDz88MNJRCRTp05NFi1alKxatSrTRh2AdKuuFmzfvj0ZOnRo0q1bt2Tx4sXJqlWrMj9bt27N9KMWQHpVVwfKysqSK664Ipk/f37y+uuvJwsXLky+/vWvJ7m5uckLL7yQ6UcdgPSqyf8GH9WzZ89kwoQJlZapA5Bu1dWCjRs3Jt/73veSefPmJcuXL0+eeOKJZNCgQcmBBx7o/UJoJmrymmDatGlJq1atkjvvvDNZtmxZ8otf/CJp2bJlMmfOnEybuqgDtQo8kiRJ/vKXvyT9+/dPcnNzk759+yZ33nlnpfUVFRXJNddck3Tu3DnJzc1NPve5zyVLliyp1GbLli3JxRdfnLRr1y7Jy8tLTj/99GTFihW1nQrQSKqrA5MmTUoiosrPNddck2mjDkD67a0WLF++fLd1ICKSJ554ItNOLYB021sd2LJlS3LGGWckXbt2TXJycpIuXbokQ4cOTZ555plKfagDkG7V/W/wUbsLPNQBSL+91YLNmzcnJ510UnLAAQckrVq1Snr06JGMGjWqyvNcLYB0q8lrgokTJyYHH3xw0rp16+SII45IZsyYUWl9XdSBrCRJklp8OwUAAAAAAKDJqdU1PAAAAAAAAJoigQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAADYb9dee20ceeSRjT0NAADgYywrSZKksScBAAA0XVlZWXtdP2rUqPjlL38ZW7dujfbt2zfQrAAAACoTeAAAAHu1evXqzO9//OMf4+qrr46lS5dmluXl5UVJSUljTA0AACDDKa0AAIC96ty5c+anpKQksrKyqiz76CmtLrjggvjSl74UP/nJT6JTp07Rpk2buO6662LHjh0xbty4aNeuXXTr1i3uueeeSmO99dZbMXz48Gjbtm20b98+hg0bFq+//nrD3mEAACCVBB4AAEC9+Oc//xlvv/12PPnkk3HLLbfEtddeG6effnq0bds2FixYEGPHjo2xY8fGypUrIyJi8+bNcdxxx0VhYWE8+eSTMXfu3CgsLIwhQ4bEtm3bGvneAAAATZ3AAwAAqBft2rWL2267Lfr06ROjR4+OPn36xObNm+PKK6+MT33qU3HFFVdETk5OPPXUUxERMXXq1GjRokXcfffdcdhhh8UhhxwSkyZNihUrVsSsWbMa984AAABNXnZjTwAAAGieDj300GjR4v8+Y9WpU6fo379/5u+WLVtG+/btY82aNRERsXDhwnjllVeiqKioUj/l5eXx6quvNsykAQCA1BJ4AAAA9aJVq1aV/s7KytrtsoqKioiIqKioiAEDBsSUKVOq9HXAAQfU30QBAIBmQeABAAA0CUcffXT88Y9/jI4dO0ZxcXFjTwcAAEgZ1/AAAACahBEjRkSHDh1i2LBhMWfOnFi+fHnMnj07vvWtb8Wbb77Z2NMDAACaOIEHAADQJOTn58eTTz4ZPXr0iDPPPDMOOeSQGD16dGzZssU3PgAAgGplJUmSNPYkAAAAAAAA9odveAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACk3v8HUEYGyV77YKkAAAAASUVORK5CYII=\n", "text/plain": [ - "" + "" ] }, - "execution_count": 11, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -815,7 +660,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -827,12 +672,12 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3de3RV5Zk/8CeQCzEhCRcDwXJTRLygoHQs6kztVAHrEmtntFJEpY5WB7V2quO4vNE6WmlrqaOzdGS0Y50q01bbsVbrXSvC4JVaKwuRCrZKoCYSAkJAsn9/8OPUlEsSck5Odvh81spaZO/3vPvd593nOfucL9m7IEmSJAAAAAAAAFKsR74HAAAAAAAA0FECDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYHHDpx99tlRUFCw3c9bb72103WTJk3KPH7YsGE7bHPjjTdm2rzzzjtx4oknxl577RXV1dVx2WWXxUcffZRZv3LlyvjSl74UI0eOjB49esQll1yy3TgfeOCBGDduXFRVVUVZWVmMGTMm7rnnntw+OSmXlrmNiFizZk3MmDEjampqoqSkJEaOHBkPP/xw7p6cFNs2d+eff/5262bMmBEFBQVx9tlnt2jbVeZ5m7lz50ZBQUF8/vOf7+Cz0b1le64jIubPnx+f+9znok+fPtGrV68YPXp0fO9734stW7a0aFdfXx9Tp06NioqKqKqqinPOOSfWrVuXWb9x48Y4++yzY/To0VFYWLjDuXzmmWd2OKba2tosPDsAAAAAe7bCfGz0g/WbOnV7fcqK2/2YSZMmxQ9+8IMWy/bee++drispKWnx+ze/+c0499xzWyzr3bt3RERs2bIlTjzxxBg4cGDMnz8/Vq5cGWeeeWYUFRXFDTfcEBERTU1Nsffee8dVV10Vs2fP3uEY+/btG1deeWWMGjUqiouL46GHHorp06dHdXV1TJw4sd373FENTQ2dur3Kksrdelwa5nbTpk1x/PHHR3V1dfz0pz+NffbZJ1asWBFVVVW7tc8dtaFhY6dur7SyV7sfM3jw4Jg7d27Mnj07SktLI2LrF9D33ntvDBkypEXbrjLP2yxfvjwuvfTS+Ou//ut273c2bamr69Tt9ezXb7cel825/tnPfhannXZaTJ8+PZ5++umoqqqKJ554Iv75n/85FixYED/+8Y+joKAgIiKmTp0aK1eujMcffzw2b94c06dPj/POOy/uvffeiNh6XJSWlsbFF18c999//y73YcmSJVFRUZH5vbq6ereeCwAAAAD+LC+BxwnffrpTt/d/32j/l/8lJSUxcODAdq/bpnfv3jtt89hjj8Ubb7wRTzzxRAwYMCDGjBkT1113XVx++eUxc+bMKC4ujmHDhsXNN98cERF33XXXDvs59thjW/z+1a9+Ne6+++6YN29eXgKPaY98qVO39+Dnf7lbj0vD3N51111RX18f8+fPj6KioojY+lcH+fLDM3/aqdv7yv+e0e7HHH744bFs2bJ44IEHYurUqRGx9a+ghgwZEsOHD2/RtqvMc8TWL8mnTp0a3/jGN+K5556LNWvWtGe3s6r20DGdur193v3Dbj0uW3O9fv36OPfcc2Py5Mlxxx13ZJb/wz/8QwwYMCAmT54cP/7xj+OLX/xiLF68OH71q1/Fiy++GOPGjYuIiFtuuSU+97nPxXe/+90YNGhQlJWVxW233RYREc8///wu57K6ujpvASYAAABAd+WSVnmwYMGCGD16dAwYMCCzbOLEibF27dr43e9+t1t9JkkSTz75ZCxZsiT+5m/+JltDpZ2yNbcPPvhgjB8/PmbMmBEDBgyIQw45JG644YbtLrFDS1/+8pdb/G/+u+66K6ZPn5717WTzNfzNb34zqqur45xzzsn2MLu1bMz1Y489FnV1dXHppZdut+6kk06KkSNHxn333RcRW+e8qqoqE3ZERBx33HHRo0ePWLhwYbvHP2bMmKipqYnjjz8+nn/++XY/HgAAAIDtCTx24qGHHory8vLMz6mnnrrTdeXl5ZnL2Gxz+eWXb9fmueeei4iI2traFl+URkTm9/Zex72hoSHKy8ujuLg4TjzxxLjlllvi+OOP351d3mOkYW5///vfx09/+tPYsmVLPPzww3H11VfHTTfdFP/6r/+6u7u9RzjjjDNi3rx5sWLFilixYkU8//zzccYZ2/+1SFeZ53nz5sWdd94Zc+bMae+u7vGyMddvvvlmREQceOCBO9zGqFGjMm1qa2u3u+xUYWFh9O3bt11zXlNTE7fffnvcf//9cf/998fgwYPj2GOPjVdeeaXNfQAAAACwY3m5pFUafOYzn8lcmiQioqysbKfrIrbeT+PjLrvsssyNc7fZZ599sj7O3r17x6JFi2LdunXx5JNPxj/90z/Fvvvuu93lrvizNMxtc3NzVFdXxx133BE9e/aMI444It599934zne+E9dee21Wt9Wd7L333nHiiSfGf/3Xf0WSJHHiiSdG//79t2vXFea5sbExpk2bFnPmzNnhGNm1bM51kiQ5HevHHXDAAXHAAQdkfj/qqKNi2bJlMXv27Ljnnns6bRwAAAAA3VFeAo9H/vkz+dhsu5SVlcWIESPavW6b/v3777TNwIED44UXXmixbNWqVZl17dGjR4/MdsaMGROLFy+Ob33rW3kJPO454d5O3+buSMPc1tTURFFRUfTs2TOz7MADD4za2trYtGlTFBcXt7mvbDjzh3/fqdvriC9/+ctx4YUXRkTEv//7v++wTVeY52XLlsXy5cvjpJNOyixrbm6OiK1/ObBkyZLYb7/92tRXtgx8bVGnbq+jOjrXI0eOjIiIxYsXx1FHHbXd+sWLF8dBBx0UEVvndfXq1S3Wf/TRR1FfX9/uuv2X/uqv/irmzZvXoT4AAAAAyFPg0aesc7+s7WrGjx8f119/faxevTpziZTHH388KioqMl+u7a7m5uZoamrKxjDbrbKkMi/b7UqyNbdHH3103HvvvdHc3Bw9emy98tybb74ZNTU1nR52RESUVvbq9G3urkmTJsWmTZuioKAgJk6cmJNtZGOeR40aFb/97W9bLLvqqquisbExbr755hg8eHDWx92anv36dfo2O6Kjcz1hwoTo27dv3HTTTdsFHg8++GAsXbo0rrvuuojYOudr1qyJl19+OY444oiIiHjqqaeiubk5jjzyyA7tx6JFi6KmpqZDfQAAAADgkla7pampabtrthcWFra4nEpjY+N2bfbaa6+oqKiICRMmxEEHHRTTpk2Lb3/721FbWxtXXXVVzJgxI0pKSjLtFy3a+r+t161bF3/6059i0aJFUVxcnPlC9Vvf+laMGzcu9ttvv2hqaoqHH3447rnnnu0u30LbdZW5veCCC+LWW2+Nr371q3HRRRfF0qVL44YbboiLL744V7vebfTs2TMWL16c+feOdIV57tWrVxxyyCEt+q+qqoqI2G45O9bRuS4rK4v/+I//iNNPPz3OO++8uPDCC6OioiKefPLJuOyyy+Lv//7v47TTTouIrX9hNWnSpDj33HPj9ttvj82bN8eFF14Yp59+egwaNCjT9xtvvBGbNm2K+vr6aGxszBwDY8aMiYiI73//+zF8+PA4+OCDY+PGjfGf//mf8dRTT8Vjjz2W9ecHAAAAYI+TsJ2zzjorOfnkk3e6LiK2+znggAMybYYOHbrDNl/5ylcybZYvX56ccMIJSWlpadK/f//k61//erJ58+YW29pRH0OHDs2sv/LKK5MRI0YkvXr1Svr06ZOMHz8+mTt3bnafjG4mLXObJEkyf/785Mgjj0xKSkqSfffdN7n++uuTjz76KHtPRjeyq3lNkiQ5+eSTk7POOivTtivNc3v2g+zPdZIkya9//etk4sSJSUVFRVJcXJwcfPDByXe/+93tXm91dXXJlClTkvLy8qSioiKZPn160tjY2KLNzo6dbWbNmpXst99+Sa9evZK+ffsmxx57bPLUU0918FkBAAAAIEmSpCBJOvFurQAAAAAAADnQI98DAAAAAAAA6CiBBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKlXmKuOm5ub47333ovevXtHQUFBrjYDAAAAAACkQJIk0djYGIMGDYoePbL/9xg5Czzee++9GDx4cK66BwAAAAAAUugPf/hDfOITn8h6vzkLPHr37h0RWwdeUVGRq80AAAAAAAApsHbt2hg8eHAmP8i2nAUe2y5jVVFRIfAAAAAAAAAiInJ2Gww3LQcAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6OQ881td/mOtNQLf0x5Xvxnd/clP8ceW7+R4K3Uz9xvq4d/GPon5jfURsrdMv3febrNbrXPSZZltWrYq1N30vtqxale+hZE132KdsHadpOt7/8vXf0XaQbbk49j7e5/uNTTHn6bfi/camrPXPnq2zjil1GSC3ukud7S77EdG99oXOlfPAY8OajbneBHRLtXW18euip6K2rjbfQ6Gb+WBjfcxdcm988P9PGj78YEO8PPe38eEHG7K2jVz0mWZbVq+Oxu/Nji2rV+d7KFnTHfYpW8dpmo73v3z9d7QdZFsujr2P9/l+Y1Pc+cwygQdZ01nHlLoMkFvdpc52l/2I6F77QudySSsAAAAAACD1BB4AAAAAAEDqFeZ6A03rN8WGBpe1gvba9OHmiIhY/9H6aGhqyPNo6E7WbVq3w+VN67JXr5vWbcpKP91N85qG2FJXl+9hZEXzmu5Tlzp67KfxeF+3ad0u31t2Viegs7R2jLa3r7/UuGFzfLA+fa9dup7GDZs7dXvZfG0A8Gfd7fy3O7xfdLc5ofPkPPB49Ppno7SoNNebgW6noV99xOSIm5bPilie79GwJ/jlNU/mewjdXt3pU/I9BHZgTzz2r55/Zb6HALuU62P0oh++lNP+IVfUbwDawvsFezKXtAIAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9nN/DY+KVn45hhwzJ9Wag23l1yaKY996j8fVhl8eYAw/L93DoRpY3vL3D63me+M3PRr9hfbKyjbrlH+yR90VoTb+590XRQQfmexhZsfmNxd3mniQdPfbTeLxfd9T1Maxy+E7X76xOQGdp7Rhtjx0dz7ecOS5GDOydlf7Zs71V29ip94TJ5msDgD/rbue/3eH9orvNCZ0n54FHSVlxlFb2yvVmoNsp3qsoIiLKCsuisqQyz6OhOykvLt/h8pLy7NXrkvLirPTT3fSoqoye/frlexhZsaWq+9Sljh77aTzey4vLd/nesrM6AZ2ltWO0vX39pd6lRdGnLH2vXbqe3qVFnbq9bL42APiz7nb+2x3eL7rbnNB5XNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6vWcOXPmzFx03NTUFDfeeGP8y7/8S/SucpMZaK/mLUk0Ll8fnxr1qajoXZHv4dDN9OpZGqP3PjRKC0sjIqKotDAGHTIgirN4481c9JlmBWVlUTJ+fPQo7z7vid1hn7J1nKbpeP/L139H20G25eLY+3ifpcU944jhfWOvksKs9c+erbOOKXUZILe6S53tLvsR0b32hT/blhtcccUVUVJSkvX+C5IkSbLea0SsXbs2Kisro6GhISoqfFkLAAAAAAB7slznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8uoH6jfVx7+IfRf3G+i7RT67taJwdHfv7jU0x5+m34v3Gpqy0y5Zcba+z94PcM6fp1hXmryuMAei61AjypS3n+bvzWWDLqlWx9qbvxZZVq3ZrXB19PLBVe95fvBd1nnw91+Y4+zyndEV1OT4eBR7dwAcb62Puknvjgw4GFdnqJ9d2NM6Ojv39xqa485llbQo82tIuW3K1vc7eD3LPnKZbV5i/rjAGoOtSI8iXtpzn785ngS2rV0fj92bHltWrd2tcHX08sFV73l+8F3WefD3X5jj7PKd0RXXrBB4AAAAAAAC7JPAAAAAAAABST+ABAAAAAACkXmG+B0D2rNu0LhqaGjr0+DT5+P5ma+yNGzbHB+s37XJ9PrQ2rt3pj+4p28cKnaMrvSYdQ8COdKU6xZ5pV591OvJZoHlNQ2ypq9utxwHZ05ZzUO9Fna+zPxuY49zxOY+upHHDRzntX+DRjVw9/8p8D6FT5WJ/L/rhS1nvMxu66rjoehwrdJRjCICuKFefdepOn5KTfoH2cQ7aNZmX7sNc0pV81LQ+p/27pBUAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDquYdHN3LdUdfHsMrhu/345Q1vp+o+IB/f32yN/ZYzx8WIgb13uv6t2sa8XPewtXG1V772g9zL9rFC5+hKr0nHELAjXalOsWfa1WedjnwW6Df3vig66MB2P27zG4vd/wOyqC3noN6LOl9nfzYwx7njcx5dyaKl78Xfzspd/wKPbqS8uDwqSyo79Pg0+fj+ZmvsvUuLok9Z8S7X50Nr49qd/uiesn2s0Dm60mvSMQTsSFeqU+yZdvVZpyOfBXpUVUbPfv3a/bgtVbv/uQvYXlvOQb0Xdb7O/mxgjnPH5zy6kt6luY0kXNIKAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9XrOnDlzZi46bmpqihtvvDGuuOKKKCkpycUm+JhePUtj9N6HRmlhaZfoJ9d2NM6Ojr20uGccMbxv7FWy6+vItbVdtuRqe529H+SeOU23rjB/XWEMQNelRpAvbTnP353PAgVlZVEyfnz0KN+9e4B09PHAVu15f/Fe1Hny9Vyb4+zznNLVNDU1xc3f+07OcoOCJEmSrPcaEWvXro3KyspoaGiIioqKXGwCAAAAAABIiVznBi5pBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN47CHeb2yKOU+/Fe83NnWoDQDs6eo31se9i38U9Rvrd7kMAAAA6FwCjz3E+41Nceczy1oNPFprAwB7ug821sfcJffGBx8LN3a0DAAAAOhcAg8AAAAAACD1BB4AAAAAAEDqFeZ7AHSuxg2b44P1m3a6DgBom3Wb1kVDU0Pm3wAAAEB+CTz2MBf98KV8DwEAuoWr51+Z7yEAAAAAH+OSVgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKnnHh57mFvOHBcjBvbe4bq3ahvd4wMA2ui6o66PYZXDIyJiecPb7ukBAAAAeSbw2MP0Li2KPmXFO10HALRNeXF5VJZUZv4NAAAA5JdLWgEAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gcceon/vkjjn2P2if++SDrUBgD1dn1594/QDvhR9evXd5TIAAACgcxUkSZLkouO1a9dGZWVlNDQ0REVFRS42AQAAAAAApESucwN/4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1CnPVcZIkERGxdu3aXG0CAAAAAABIiW15wbb8INtyFnjU1dVFRMTgwYNztQkAAAAAACBl6urqorKyMuv95izw6Nu3b0REvPPOOzkZOJAOa9eujcGDB8cf/vCHqKioyPdwgDxQBwB1AIhQCwB1AIhoaGiIIUOGZPKDbMtZ4NGjx9bbg1RWVipgQFRUVKgFsIdTBwB1AIhQCwB1APhzfpD1fnPSKwAAAAAAQCcSeAAAAAAAAKnXc+bMmTNz1nnPnnHsscdGYWHOrpwFpIBaAKgDgDoARKgFgDoA5LYOFCRJkmS9VwAAAAAAgE7kklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSr92Bx7vvvhtnnHFG9OvXL0pLS2P06NHx0ksvZdYnSRLXXHNN1NTURGlpaRx33HGxdOnSFn3U19fH1KlTo6KiIqqqquKcc86JdevWdXxvgE7RWh144IEHYsKECdGvX78oKCiIRYsWbdfHxo0bY8aMGdGvX78oLy+Pv/u7v4tVq1Z15m4AHbSrWrB58+a4/PLLY/To0VFWVhaDBg2KM888M957770WfTgngHRr7Zxg5syZMWrUqCgrK4s+ffrEcccdFwsXLmzRhzoA6dZaHfi4888/PwoKCuL73/9+i+XqAKRfa7Xg7LPPjoKCghY/kyZNatGHWgDp1pZzgsWLF8fkyZOjsrIyysrK4pOf/GS88847mfXZ+L6wXYHHBx98EEcffXQUFRXFI488Em+88UbcdNNN0adPn0ybb3/72/Fv//Zvcfvtt8fChQujrKwsJk6cGBs3bsy0mTp1avzud7+Lxx9/PB566KH49a9/Heedd167Bg7kR1vqwPr16+OYY46JWbNm7bSfr33ta/GLX/wifvKTn8Szzz4b7733XnzhC1/ojF0AsqC1WvDhhx/GK6+8EldffXW88sor8cADD8SSJUti8uTJLfpxTgDp1ZZzgpEjR8att94av/3tb2PevHkxbNiwmDBhQvzpT3/KtFEHIL3aUge2+dnPfhb/93//F4MGDdpunToA6dbWWjBp0qRYuXJl5ue+++5rsV4tgPRqSx1YtmxZHHPMMTFq1Kh45pln4rXXXourr746evXqlWmTle8Lk3a4/PLLk2OOOWan65ubm5OBAwcm3/nOdzLL1qxZk5SUlCT33XdfkiRJ8sYbbyQRkbz44ouZNo888khSUFCQvPvuu+0ZDpAHrdWBj3v77beTiEheffXVFsvXrFmTFBUVJT/5yU8yyxYvXpxERLJgwYKsjhfIjfbUgm1eeOGFJCKSFStWJEninADSbnfqQENDQxIRyRNPPJEkiToAadfWOvDHP/4x2WeffZLXX389GTp0aDJ79uzMOnUA0q8tteCss85KTj755J2uVwsg3dpSB774xS8mZ5xxxk7XZ+v7wnb9hceDDz4Y48aNi1NPPTWqq6tj7NixMWfOnMz6t99+O2pra+O4447LLKusrIwjjzwyFixYEBERCxYsiKqqqhg3blymzXHHHRc9evTY7s/bga6ntTrQFi+//HJs3ry5Ra0YNWpUDBkyJFMrgK5td2pBQ0NDFBQURFVVVUQ4J4C0a28d2LRpU9xxxx1RWVkZhx12WESoA5B2bakDzc3NMW3atLjsssvi4IMP3q4PdQDSr63nBM8880xUV1fHAQccEBdccEHU1dVl1qkFkG6t1YHm5ub45S9/GSNHjoyJEydGdXV1HHnkkfHzn/880yZb3xe2K/D4/e9/H7fddlvsv//+8eijj8YFF1wQF198cdx9990REVFbWxsREQMGDGjxuAEDBmTW1dbWRnV1dYv1hYWF0bdv30wboOtqrQ60RW1tbRQXF2e+9Nzm47UC6NraWws2btwYl19+eUyZMiUqKioiwjkBpF1b68BDDz0U5eXl0atXr5g9e3Y8/vjj0b9//4hQByDt2lIHZs2aFYWFhXHxxRfvsA91ANKvLbVg0qRJ8cMf/jCefPLJmDVrVjz77LNxwgknxJYtWyJCLYC0a60OrF69OtatWxc33nhjTJo0KR577LE45ZRT4gtf+EI8++yzEZG97wsL2zPw5ubmGDduXNxwww0RETF27Nh4/fXX4/bbb4+zzjqrPV0BKaUOABHtqwWbN2+O0047LZIkidtuuy0fwwVyoK114DOf+UwsWrQo3n///ZgzZ06cdtppsXDhwu2+1ADSp7U68PLLL8fNN98cr7zyShQUFOR5tECutOWc4PTTT8+0Hz16dBx66KGx3377xTPPPBOf/exn8zJuIHtaqwPNzc0REXHyySfH1772tYiIGDNmTMyfPz9uv/32+PSnP521sbTrLzxqamrioIMOarHswAMPzNxJfeDAgRER2905fdWqVZl1AwcOjNWrV7dY/9FHH0V9fX2mDdB1tVYH2mLgwIGxadOmWLNmTYvlH68VQNfW1lqwLexYsWJFPP7445vo3U4AAAbjSURBVJm/7ohwTgBp19Y6UFZWFiNGjIhPfepTceedd0ZhYWHceeedEaEOQNq1Vgeee+65WL16dQwZMiQKCwujsLAwVqxYEV//+tdj2LBhEaEOQHewO98T7LvvvtG/f/946623IkItgLRrrQ70798/CgsLW80WsvF9YbsCj6OPPjqWLFnSYtmbb74ZQ4cOjYiI4cOHx8CBA+PJJ5/MrF+7dm0sXLgwxo8fHxER48ePjzVr1sTLL7+cafPUU09Fc3NzHHnkke0ZDpAHrdWBtjjiiCOiqKioRa1YsmRJvPPOO5laAXRtbakF28KOpUuXxhNPPBH9+vVr0d45AaTb7p4TNDc3R1NTU0SoA5B2rdWBadOmxWuvvRaLFi3K/AwaNCguu+yyePTRRyNCHYDuYHfOCf74xz9GXV1d1NTURIRaAGnXWh0oLi6OT37yk7tsk7XvC9t8e/MkSV544YWksLAwuf7665OlS5cmP/rRj5K99tor+e///u9MmxtvvDGpqqpK/vd//zd57bXXkpNPPjkZPnx4smHDhkybSZMmJWPHjk0WLlyYzJs3L9l///2TKVOmtGcoQJ60pQ7U1dUlr776avLLX/4yiYhk7ty5yauvvpqsXLky0+b8889PhgwZkjz11FPJSy+9lIwfPz4ZP358PnYJ2A2t1YJNmzYlkydPTj7xiU8kixYtSlauXJn5aWpqyvTjnADSq7U6sG7duuSKK65IFixYkCxfvjx56aWXkunTpyclJSXJ66+/nulHHYD0astng780dOjQZPbs2S2WqQOQbq3VgsbGxuTSSy9NFixYkLz99tvJE088kRx++OHJ/vvvn2zcuDHTj1oA6dWWc4IHHnggKSoqSu64445k6dKlyS233JL07Nkzee655zJtsvF9YbsCjyRJkl/84hfJIYcckpSUlCSjRo1K7rjjjhbrm5ubk6uvvjoZMGBAUlJSknz2s59NlixZ0qJNXV1dMmXKlKS8vDypqKhIpk+fnjQ2NrZ3KECetFYHfvCDHyQRsd3Ptddem2mzYcOG5B//8R+TPn36JHvttVdyyimntAhEgK5vV7Xg7bff3mEdiIjk6aefzrRzTgDptqs6sGHDhuSUU05JBg0alBQXFyc1NTXJ5MmTkxdeeKFFH+oApFtrnw3+0o4CD3UA0m9XteDDDz9MJkyYkOy9995JUVFRMnTo0OTcc89NamtrW/ShFkC6teWc4M4770xGjBiR9OrVKznssMOSn//85y3WZ+P7woIkSZJ2/HUKAAAAAABAl9Oue3gAAAAAAAB0RQIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAA6LCzzz47Pv/5z+d7GAAAwB6sMN8DAAAAuraCgoJdrr/22mvj5ptvjiRJOmlEAAAA2xN4AAAAu7Ry5crMv//nf/4nrrnmmliyZElmWXl5eZSXl+djaAAAABkuaQUAAOzSwIEDMz+VlZVRUFDQYll5efl2l7Q69thj46KLLopLLrkk+vTpEwMGDIg5c+bE+vXrY/r06dG7d+8YMWJEPPLIIy229frrr8cJJ5wQ5eXlMWDAgJg2bVq8//77nb3LAABACgk8AACAnLj77rujf//+8cILL8RFF10UF1xwQZx66qlx1FFHxSuvvBITJkyIadOmxYcffhgREWvWrIm//du/jbFjx8ZLL70Uv/rVr2LVqlVx2mmn5XlPAACANBB4AAAAOXHYYYfFVVddFfvvv39cccUV0atXr+jfv3+ce+65sf/++8c111wTdXV18dprr0VExK233hpjx46NG264IUaNGhVjx46Nu+66K55++ul4880387w3AABAV+ceHgAAQE4ceuihmX/37Nkz+vXrF6NHj84sGzBgQERErF69OiIifvOb38TTTz+9w/uBLFu2LEaOHJnjEQMAAGkm8AAAAHKiqKioxe8FBQUtlhUUFERERHNzc0RErFu3Lk466aSYNWvWdn3V1NTkcKQAAEB3IPAAAAC6hMMPPzzuv//+GDZsWBQW+qgCAAC0j3t4AAAAXcKMGTOivr4+pkyZEi+++GIsW7YsHn300Zg+fXps2bIl38MDAAC6OIEHAADQJQwaNCief/752LJlS0yYMCFGjx4dl1xySVRVVUWPHj66AAAAu1aQJEmS70EAAAAAAAB0hP8mBQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1/h//ffiQt3ubxgAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAl9UlEQVR4nO3de3RV5Z0//k8gEEJIjpAYAnJzitzECpWZqnTVajtKRwutq/WCIkjrZU3VTlupdrqsOi67rFOko1ZbrTq9UGnroF+trbd6RSq2CK1WRlFBRblowBAEApL9+4OfZ4xccjsnJzu8XmtlLbL3s5/97PPs8znn8M7ZuyhJkiQAAAAAAABSrFuhBwAAAAAAANBeAg8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2Bx27MmDEjioqKdvl56aWX9rhu0qRJ2e2HDRu22zZXXXVVts1rr70Wn/vc56KsrCyqqqriggsuiG3btmXXb926NWbMmBGHHHJIFBcXx+c///ldxrlgwYKYOHFiVFZWRmlpaYwaNSrmzJmT18cm7dIytxERDQ0N8Z3vfCeGDh0aJSUl8ZGPfCRuvfXWvD02aff+/J177rm7rPvXf/3XKCoqihkzZjRp21nm+n1PPvlkFBcXx7hx49r1WHR1uZ7riIiFCxfGv/zLv0Tfvn2jV69eccghh8Ts2bNjx44dTdpt2LAhpk2bFplMJjKZTEybNi3eeeedJm2+9rWvxWGHHRYlJSW7ncuVK1fudkz33Xdfux4XAAAAgH1dcSF2uuHdbc03yqG+ZT1bvc2kSZPitttua7Js//333+O6kpKSJr//x3/8R5x11llNlpWXl0dExI4dO+L444+P/fffPxYsWBC1tbUxffr0SJIkrrvuumyb0tLSuOCCC+J//ud/djvGsrKyOO+88+KjH/1olJWVxYIFC+Kcc86JsrKyOPvss1t9zLlQ11DXYfvKlGTatF0a5jYi4qSTToq1a9fGLbfcEsOHD49169bFe++916Zjbq8tdVs7dH+lmV5t2m7w4MExb968mDNnTpSWlkbEztDh9ttvjyFDhjRp25nmOiKirq4uzjjjjPj0pz8da9eubf3B58iO2toO3V/3yso2bZfLub7zzjvjpJNOijPPPDMeeeSR2G+//eKhhx6Kb33rW/HUU0/Fb37zmygqKoqIiKlTp8aqVauy4cTZZ58d06ZNi3vuuSfbX5IkMXPmzFi0aFH87W9/2+MxPPTQQ3HwwQdnf+/Xr1+bHgsAAAAAdipI4PHZqx/p0P09dflxrd6mpKQkampqWr3ufeXl5Xts88ADD8Tzzz8fr7/+egwcODAiImbPnh0zZsyIK6+8MioqKqKsrCxuvPHGiNj5V98f/gviiIjx48fH+PHjs78PGzYs5s+fH0888UTBAo9pf5jaYfu6+/P3tmm7NMztfffdF4899li88sor2f8EHTZsWAuPMPd+fsYdHbq/c/7f6W3a7mMf+1i88sorMX/+/DjttNMiImL+/PkxePDg+Id/+IcmbTvLXL/vnHPOialTp0b37t3jrrvuauER596aj47r0P0d8MbrbdouV3P97rvvxllnnRWTJ0+Om266Kbv8K1/5SvTv3z8mT54cv/nNb+Lkk0+OZcuWxX333RdPPfVUfPzjH4+IiJtvvjmOOOKIeOGFF2LkyJEREXHttddGRMRbb72118CjsrKy2XMQAAAAgJZzSasC+NOf/hRjx47N/idpRMRxxx0XDQ0NsXjx4jb3u2TJkli4cGEcddRRuRgmbZCrub377rtjwoQJcfXVV8cBBxwQI0aMiAsvvDC2bNmSj2F3KWeeeWaTv+a/9dZbY+bMmTnfTy6fx7fddlu8/PLLcemll+Z6mF1aLub6gQceiNra2rjwwgt3Wfe5z30uRowYEbfffntE7JzzTCaTDTsiIg4//PDIZDKxcOHCVo9/8uTJUV1dHRMnTow77ujYUBEAAACgKxJ47MHvfve76NOnT/bnS1/60h7X9enTJ6644oom21900UW7tHn00UcjImLNmjXRv3//Ju379u0bPXv2jDVr1rR6rIMGDYqSkpKYMGFCfPWrX42vfOUrrT/gfUga5vaVV16JBQsWxHPPPRd33nln/PCHP4w77rgjvvrVr7b9wPcR06ZNiwULFsTKlSvj1VdfjSeffDJOP33Xb4x0lrlevnx5XHzxxTF37twoLi7Il+5SKxdz/eKLL0ZExOjRo3e7j1GjRmXbrFmzJqqrq3dpU11d3ao579OnT1xzzTVxxx13xO9///v49Kc/HSeffHL88pe/bHEfAAAAAOzK/67twdFHH529FE3Ezvtl7GldxK7XXp81a1b2prnvO+CAA7L/fv968B+UJMlulzfniSeeiE2bNsVTTz0VF198cQwfPjxOPfXUVvezr0jD3DY2NkZRUVHMnTs3Mpmd9yq55ppr4otf/GL86Ec/yt6zgF1VVVXF8ccfHz/72c8iSZI4/vjjo6qqapd2nWGud+zYEVOnTo3LL788RowY0aJt+D+5nOskSXa7jw/PZy6e31VVVfH1r389+/uECRNiw4YNcfXVV+82sAEAAACgZQoSePzhW0cXYretUlZWFsOHD2/1uvdVVVXtsU1NTU0sWrSoybINGzbE9u3bd/mL8ZY48MADIyLikEMOibVr18Zll11WsMDjF5/9VUH22xppmNsBAwbEAQcckA07Inb+BXqSJLFq1ao46KCDWtxXLpzx8y926P7aa+bMmXHeeedFRMSPfvSj3bbpDHNdX18ff/nLX2LJkiXZ8TY2NkaSJFFcXBwPPPBAHHPMMS3qK1dq/ra0Q/fXXu2d6/eDpmXLlsWRRx65y/r//d//jTFjxkTEzjnf3Q3l33rrrTbV7g86/PDD46c//Wm7+gAAAADY1xUk8Ohb1rMQu+00jjjiiLjyyitj9erVMWDAgIjYeR35kpKSOOyww9rVd5Ik0dDQkIthtkmmJNN8oy4sV3M7ceLE+O1vfxubNm2KPn36RMTOS+9069YtBg0alJex701ppleH77M9Jk2aFNu2bYuInffVyIdczHVFRUU8++yzTZbdcMMN8fDDD8cdd9yRDTM7UvfKyg7fZ3u0d66PPfbY6NevX8yePXuXwOPuu++O5cuXZy9/dcQRR0RdXV08/fTT8U//9E8REbFo0aKoq6vbbVjSGkuWLMmeRwAAAAC0jUtatUFDQ8Mu12svLi5ucimV+vr6Xdr07t07Kioq4thjj40xY8bEtGnT4j//8z9j/fr1ceGFF8ZZZ50VFRUV2fbPP/98bNu2LdavXx/19fWxdOnSiIgYN25cROz8a+YhQ4bEqFGjIiJiwYIF8YMf/CDOP//8PBz1vqGzzO3UqVPjiiuuiDPPPDMuv/zyePvtt2PWrFkxc+ZMl7Nqge7du8eyZcuy/96dzjDX3bp1i7Fjxzbpv7q6Onr16rXLcnavvXNdVlYWP/nJT+KUU06Js88+O84777yoqKiIP/7xjzFr1qz44he/GCeddFJE7PyW1aRJk+Kss86Kn/zkJxERcfbZZ8cJJ5wQI0eOzPb90ksvxaZNm2LNmjWxZcuW7JyPGTMmevbsGT/72c+iR48eMX78+OjWrVvcc889ce2118b3v//9XD88AAAAAPuWhF1Mnz49mTJlyh7XRcQuPyNHjsy2GTp06G7bnHPOOdk2r776anL88ccnpaWlSb9+/ZLzzjsv2bp1a5N97amf91177bXJwQcfnPTu3TupqKhIxo8fn9xwww3Jjh07cvuAdCFpmdskSZJly5Yln/nMZ5LS0tJk0KBByTe+8Y1k8+bNuXswupi9zW2SJMmUKVOS6dOnZ9t2prn+oEsvvTQ59NBD2/QY7CtyPddJkiSPP/54MmnSpCSTySQ9e/ZMxowZk/zgBz9I3nvvvSbtamtrk9NOOy0pLy9PysvLk9NOOy3ZsGFDkzZHHXXUbve5YsWKJEmS5L//+7+T0aNHJ717907Ky8uTww47LPnFL37R3ocFAAAAYJ9XlCR7uFMrAAAAAABASnQr9AAAAAAAAADaS+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6hXnq+PGxsZ48803o7y8PIqKivK1GwAAAAAAIAWSJIn6+voYOHBgdOuW++9j5C3wePPNN2Pw4MH56h4AAAAAAEih119/PQYNGpTzfvMWeJSXl0fEzoFXVFTkazcAAAAAAEAKbNy4MQYPHpzND3Itb4HH+5exqqioEHgAAAAAAAAREXm7DYablgMAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9vAce767fnO9dQJe0avUb8YPfzo5Vq98o9FDoYtZvXR+/WjY31m9dHxE76/Rfbv9rTut1PvpMsx1r18bG2dfEjrVrCz2UnOkKx5Sr8zRN5/uHn//tbQe5lo9z74N9vl3fEDc/8lK8Xd+Qs/7Zt3XkOaU2A+RPV6mxXeU4IrrWsdCx8h54bHlna753AV3Smto18XiPh2NN7ZpCD4UuZsPW9THvhV/Fhv//TcPmDVti8bxnY/OGLTnbRz76TLMd69ZF/TVzYse6dYUeSs50hWPK1XmapvP9w8//9raDXMvHuffBPt+ub4hbHn1Z4EHOdOQ5pTYD5E9XqbFd5Tgiutax0LFc0goAAAAAAEg9gQcAAAAAAJB6xfneQcO722JLnctaQWtt27w9IiLefe/dqGuoK/Bo6Eo2bdu02+UNm3JXrxs2bctJP11N4zt1saO2ttDDyInGd7pOXWrvuZ/G833Ttk17fW3ZU52AjtLcOdravj6sfsv22PBu+p67dD71W7Z3+D5z+fwAYKeu9v63K7xWdLU5oePkPfC4/8rHorRHab53A11OXeX6iMkRs1d+P2JloUfDvuDe7/6x0EPo8mpPObXQQ2A39sVz/5KF3yn0EGCv8n2Onv/zv+S1f8gnNRyA5nitYF/mklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpl/d7eBz3naNi2Ngh+d4NdDlLXlgaC968P7457KIYN/rQQg+HLmRl3YrdXs/z+P/4dFQO65uTfdSu3LBP3hehOZXzbo8eY0YXehg5sf35ZV3mniTtPffTeL5fceSVMSxz4B7X76lOQEdp7hxtjd2dz9edMSGG15TnpH/2bS+tqe/we8Lk8vkBwE5d7f1vV3it6GpzQsfJe+BRUtYzSjO98r0b6HJ69u4RERFlxWWRKckUeDR0JX169tnt8pI+uavXJX165qSfrqbbfpnoXllZ6GHkxI79uk5dau+5n8bzvU/PPnt9bdlTnYCO0tw52tq+Pqy8tEf0LUvfc5fOp7y0R4fvM5fPDwB26mrvf7vCa0VXmxM6jktaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASL28Bx6l+7lhObRFTWVNfHL7MVFTWVPoodDF9O3VL04ZOTX69uoXERG9+5bGYaccEr37luZsH/noM826V1dH+Te+Ht2rqws9lJzpCseUq/M0Tef7h5//7W0HuZaPc++DfVaVl8SXP/WRqCovyVn/7Ns68pxSmwHyp6vU2K5yHBFd61joWEVJkiT56Hjjxo2RyWSirq4uKioq8rELAAAAAAAgJfKdG7ikFQAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw6ALWb10fv1o2N9ZvXd8p+sm33Y2zvWN/u74hbn7kpXi7viEn7XIlX/vr6OMg/8xpunWW+ess4wA6H/WBQmnJ+/y2fBbYsXZtbJx9TexYu7ZN42rv9sBOrXl98VrUsQrxeJvj3POY0hnV5vl8FHh0ARu2ro95L/wqNrQzqMhVP/m2u3G2d+xv1zfELY++3KLAoyXtciVf++vo4yD/zGm6dZb56yzjADof9YFCacn7/LZ8Ftixbl3UXzMndqxb16ZxtXd7YKfWvL54LepYhXi8zXHueUzpjGo3CTwAAAAAAAD2SuABAAAAAACknsADAAAAAABIveJCD4Dc2bRtU9Q11LVr+zT54PHmauz1W7bHhne37XV9ITQ3rrb0R9eU63OFjtHZnpPOI+DDOludYt+zt8867fks0PhOXeyorW3TdkDutOT9p9eiwujIzwbmOH98xqMzqd/yXl77F3h0IZcs/E6hh9Ch8nG85//8LznvMxc667jofJwr5ILzCIDOJl+fdWpPOTUv/QKt4/1n52VuugbzSGfyXsO7ee3fJa0AAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSzz08upArjrwyhmUObPP2K+tWpOo+IB883lyN/bozJsTwmvI9rn9pTX1BrnvY3Lhaq1DHQf7l+lyhY3S256TzCPiwzlan2Pfs7bNOez4LVM67PXqMGd3q7bY/v8z9PyCHWvL+02tRYXTkZwNznD8+49GZLF3+Zhzz/fz1L/DoQvr07BOZkky7tk+TDx5vrsZeXtoj+pb13Ov6QmhuXG3pj64p1+cKHaOzPSedR8CHdbY6xb5nb5912vNZoNt+meheWdnq7Xbs1/bPXcCuWvL+02tRYXTkZwNznD8+49GZlJfmN5JwSSsAAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3h0AX179YtTRk6Nvr36dYp+8m1342zv2KvKS+LLn/pIVJWX5KRdruRrfx19HOSfOU23zjJ/nWUcQOejPlAoLXmf35bPAt2rq6P8G1+P7tXVbRpXe7cHdmrN64vXoo5ViMfbHOeex5TOqLJPfs/HoiRJknx0vHHjxshkMlFXVxcVFRX52AUAAAAAAJAS+c4NfMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/DYh7xd3xA3P/JSvF3f0Kb1AEDE+q3r41fL5sb6retbtBwAAADoGAKPfcjb9Q1xy6Mv7zXw2Nt6ACBiw9b1Me+FX8WGDwUbe1oOAAAAdAyBBwAAAAAAkHoCDwAAAAAAIPWKCz0AOl79lu2x4d1tu10OALTMpm2boq6hrsnvAAAAQOEIPPZB5//8L4UeAgCk3iULv1PoIQAAAAAf4JJWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqeceHvug686YEMNryndZ/tKaevf3AIAWuuLIK2NY5sDs7yvrVrivBwAAABSQwGMfVF7aI/qW9dztcgCgZfr07BOZkkyT3wEAAIDCcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw2IdUlZfElz/1kagqL2nTegAgom+vfnHKyKnRt1e/Fi0HAAAAOkZRkiRJPjreuHFjZDKZqKuri4qKinzsAgAAAAAASIl85wa+4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1ivPVcZIkERGxcePGfO0CAAAAAABIiffzgvfzg1zLW+BRW1sbERGDBw/O1y4AAAAAAICUqa2tjUwmk/N+8xZ49OvXLyIiXnvttbwMHEiHjRs3xuDBg+P111+PioqKQg8HKAB1AFAHgAi1AFAHgIi6uroYMmRINj/ItbwFHt267bw9SCaTUcCAqKioUAtgH6cOAOoAEKEWAOoA8H/5Qc77zUuvAAAAAAAAHUjgAQAAAAAApF7eAo+SkpK49NJLo6SkJF+7AFJALQDUAUAdACLUAkAdAPJfB4qSJEny0jMAAAAAAEAHcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqdfqwOONN96I008/PSorK6N3794xbty4WLx4cXZ9kiRx2WWXxcCBA6O0tDQ+9alPxd///vcmfTQ0NMT5558fVVVVUVZWFpMnT45Vq1a1/2iADtFcHZg/f34cd9xxUVVVFUVFRbF06dJd+lAHIP32Vgu2b98eF110URxyyCFRVlYWAwcOjDPOOCPefPPNJn2oBZBuzb0nuOyyy2LUqFFRVlYWffv2jc985jOxaNGiJn2oA5BuzdWBDzrnnHOiqKgofvjDHzZZrg5A+jVXC2bMmBFFRUVNfg4//PAmfagFkG4teU+wbNmymDx5cmQymSgvL4/DDz88Xnvttez6XNSBVgUeGzZsiIkTJ0aPHj3iD3/4Qzz//PMxe/bs2G+//bJtrr766rjmmmvi+uuvjz//+c9RU1MT//zP/xz19fXZNv/2b/8Wd955Z8ybNy8WLFgQmzZtihNOOCF27NjRqsEDHa8ldeDdd9+NiRMnxlVXXbXHftQBSLfmasHmzZvjmWeeiUsuuSSeeeaZmD9/frz44osxefLkJv2oBZBeLXlPMGLEiLj++uvj2WefjQULFsSwYcPi2GOPjbfeeivbRh2A9GpJHXjfXXfdFYsWLYqBAwfusk4dgHRraS2YNGlSrF69Ovvz+9//vsl6tQDSqyV14OWXX45PfOITMWrUqHj00Ufjr3/9a1xyySXRq1evbJuc1IGkFS666KLkE5/4xB7XNzY2JjU1NclVV12VXbZ169Ykk8kkP/7xj5MkSZJ33nkn6dGjRzJv3rxsmzfeeCPp1q1bct9997VmOEABNFcHPmjFihVJRCRLlixpslwdgPRrTS1439NPP51ERPLqq68mSaIWQNq1pQ7U1dUlEZE89NBDSZKoA5B2La0Dq1atSg444IDkueeeS4YOHZrMmTMnu04dgPRrSS2YPn16MmXKlD2uVwsg3VpSB04++eTk9NNP3+P6XNWBVn3D4+67744JEybEl770paiuro7x48fHzTffnF2/YsWKWLNmTRx77LHZZSUlJXHUUUfFwoULIyJi8eLFsX379iZtBg4cGGPHjs22ATqv5upAS6gDkH5tqQV1dXVRVFSU/QsPtQDSrbV1YNu2bXHTTTdFJpOJQw89NCLUAUi7ltSBxsbGmDZtWsyaNSsOPvjgXfpQByD9Wvqe4NFHH43q6uoYMWJEnHXWWbFu3brsOrUA0q25OtDY2Bj33ntvjBgxIo477riorq6Oj3/843HXXXdl2+SqDrQq8HjllVfixhtvjIMOOijuv//+OPfcc+OCCy6In//85xERsWbNmoiI6N+/f5Pt+vfvn123Zs2a6NmzZ/Tt23ePbYDOq7k60BLqAKRfa2vB1q1b4+KLL46pU6dGRUVFRKgFkHYtrQO/+93vok+fPtGrV6+YM2dOPPjgg1FVVRUR6gCkXUvqwPe///0oLi6OCy64YLd9qAOQfi2pBZ/97Gdj7ty58fDDD8fs2bPjz3/+cxxzzDHR0NAQEWoBpF1zdWDdunWxadOmuOqqq2LSpEnxwAMPxBe+8IU48cQT47HHHouI3NWB4tYMvLGxMSZMmBDf+973IiJi/Pjx8fe//z1uvPHGOOOMM7LtioqKmmyXJMkuyz6sJW2AwmtpHWgLdQDSozW1YPv27XHKKadEY2Nj3HDDDc32rRZAOrS0Dhx99NGxdOnSePvtt+Pmm2+Ok046KRYtWhTV1dV77FsdgHRorg4sXrw4/uu//iueeeaZVj+n1QFIj5a8Jzj55JOz7ceOHRsTJkyIoUOHxr333hsnnnjiHvtWCyAdmqsDjY2NERExZcqU+PrXvx4REePGjYuFCxfGj3/84zjqqKP22Hdr60CrvuExYMCAGDNmTJNlo0ePzt5JvaamJiJil8Rl3bp12W991NTUxLZt22LDhg17bAN0Xs3VgZZQByD9WloLtm/fHieddFKsWLEiHnzwwey3OyLUAki7ltaBsrKyGD58eBx++OFxyy23RHFxcdxyyy0RoQ5A2jVXB5544olYt25dDBkyJIqLi6O4uDheffXV+OY3vxnDhg2LCHUAuoK2/D/BgAEDYujQobF8+fKIUAsg7ZqrA1VVVVFcXNxstpCLOtCqwGPixInxwgsvNFn24osvxtChQyMi4sADD4yampp48MEHs+u3bdsWjz32WBx55JEREXHYYYdFjx49mrRZvXp1PPfcc9k2QOfVXB1oCXUA0q8lteD9sGP58uXx0EMPRWVlZZP2agGkW1vfEyRJkr18hToA6dZcHZg2bVr87W9/i6VLl2Z/Bg4cGLNmzYr7778/ItQB6Ara8p6gtrY2Xn/99RgwYEBEqAWQds3VgZ49e8Y//uM/7rVNzupAi29vniTJ008/nRQXFydXXnllsnz58mTu3LlJ7969k1/+8pfZNldddVWSyWSS+fPnJ88++2xy6qmnJgMGDEg2btyYbXPuuecmgwYNSh566KHkmWeeSY455pjk0EMPTd57773WDAcogJbUgdra2mTJkiXJvffem0REMm/evGTJkiXJ6tWrs23UAUi35mrB9u3bk8mTJyeDBg1Kli5dmqxevTr709DQkO1HLYD0aq4ObNq0Kfn2t7+d/OlPf0pWrlyZLF68OPnyl7+clJSUJM8991y2H3UA0qslnw0+bOjQocmcOXOaLFMHIN2aqwX19fXJN7/5zWThwoXJihUrkkceeSQ54ogjkgMOOMD/F0IX0ZL3BPPnz0969OiR3HTTTcny5cuT6667LunevXvyxBNPZNvkog60KvBIkiS55557krFjxyYlJSXJqFGjkptuuqnJ+sbGxuTSSy9NampqkpKSkuSTn/xk8uyzzzZps2XLluS8885L+vXrl5SWliYnnHBC8tprr7V2KECBNFcHbrvttiQidvm59NJLs23UAUi/vdWCFStW7LYORETyyCOPZNupBZBue6sDW7ZsSb7whS8kAwcOTHr27JkMGDAgmTx5cvL000836UMdgHRr7rPBh+0u8FAHIP32Vgs2b96cHHvsscn++++f9OjRIxkyZEgyffr0XZ7nagGkW0veE9xyyy3J8OHDk169eiWHHnpoctdddzVZn4s6UJQkSdKKb6cAAAAAAAB0Oq26hwcAAAAAAEBnJPAAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AACAdrvsssti3LhxhR4GAACwDytKkiQp9CAAAIDOq6ioaK/rp0+fHtdff300NDREZWVlB40KAACgKYEHAACwV2vWrMn++9e//nV897vfjRdeeCG7rLS0NDKZTCGGBgAAkOWSVgAAwF7V1NRkfzKZTBQVFe2y7MOXtJoxY0Z8/vOfj+9973vRv3//2G+//eLyyy+P9957L2bNmhX9+vWLQYMGxa233tpkX2+88UacfPLJ0bdv36isrIwpU6bEypUrO/aAAQCAVBJ4AAAAefHwww/Hm2++GY8//nhcc801cdlll8UJJ5wQffv2jUWLFsW5554b5557brz++usREbF58+Y4+uijo0+fPvH444/HggULok+fPjFp0qTYtm1bgY8GAADo7AQeAABAXvTr1y+uvfbaGDlyZMycOTNGjhwZmzdvjn//93+Pgw46KL797W9Hz54948knn4yIiHnz5kW3bt3ipz/9aRxyyCExevTouO222+K1116LRx99tLAHAwAAdHrFhR4AAADQNR188MHRrdv//Y1V//79Y+zYsdnfu3fvHpWVlbFu3bqIiFi8eHG89NJLUV5e3qSfrVu3xssvv9wxgwYAAFJL4AEAAORFjx49mvxeVFS022WNjY0REdHY2BiHHXZYzJ07d5e+9t9///wNFAAA6BIEHgAAQKfwsY99LH79619HdXV1VFRUFHo4AABAyriHBwAA0CmcdtppUVVVFVOmTIknnngiVqxYEY899lh87Wtfi1WrVhV6eAAAQCcn8AAAADqF3r17x+OPPx5DhgyJE088MUaPHh0zZ86MLVu2+MYHAADQrKIkSZJCDwIAAAAAAKA9fMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOr9fw+gShyFf/1LAAAAAElFTkSuQmCC\n", "text/plain": [ - "" + "" ] }, - "execution_count": 12, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -842,7 +687,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "MxlrTbyPYnqB" From 81a0b481d90905304deaae2921aebf24650cf98b Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Tue, 10 Oct 2023 15:46:12 +0200 Subject: [PATCH 06/23] Using GPU in intro.ipynb when available --- tutorials/intro.ipynb | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/tutorials/intro.ipynb b/tutorials/intro.ipynb index 93c7f623f..75344267a 100644 --- a/tutorials/intro.ipynb +++ b/tutorials/intro.ipynb @@ -242,24 +242,12 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": { "id": "xC05jFO_Ynp_", "outputId": "c5502632-56ae-4adb-8bdc-112deedc8893" }, - "outputs": [ - { - "ename": "ModuleNotFoundError", - "evalue": "No module named 'google.colab'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipykernel_26526/1787874441.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mown_file\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfiles\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpopitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mOWN_FILE\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m'audio'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mown_file\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mnotebook\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'google.colab'" - ] - } - ], + "outputs": [], "source": [ "import google.colab\n", "own_file, _ = google.colab.files.upload().popitem()\n", From 191535bd47ece004b0cc3623923311b4ef7d53c3 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Wed, 15 Nov 2023 14:29:59 +0100 Subject: [PATCH 07/23] Copying the content of SpeakerDiarization task into StreamingSpeakerDiarization, adding latency as a parameter and using latency to shift the prediction and targets during training and validation steps --- .../streaming_speaker_diarization.py | 898 ++++++++++++++++++ 1 file changed, 898 insertions(+) create mode 100644 pyannote/audio/tasks/segmentation/streaming_speaker_diarization.py diff --git a/pyannote/audio/tasks/segmentation/streaming_speaker_diarization.py b/pyannote/audio/tasks/segmentation/streaming_speaker_diarization.py new file mode 100644 index 000000000..68769bf5d --- /dev/null +++ b/pyannote/audio/tasks/segmentation/streaming_speaker_diarization.py @@ -0,0 +1,898 @@ +# MIT License +# +# Copyright (c) 2020- CNRS +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import sys +import math +import warnings +from collections import Counter +from typing import Dict, Literal, Sequence, Text, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional +from matplotlib import pyplot as plt +from pyannote.core import Segment, SlidingWindowFeature +from pyannote.database.protocol import SpeakerDiarizationProtocol +from pyannote.database.protocol.protocol import Scope, Subset +from pytorch_lightning.loggers import MLFlowLogger, TensorBoardLogger +from rich.progress import track +from torch_audiomentations.core.transforms_interface import BaseWaveformTransform +from torchmetrics import Metric + +from pyannote.audio.core.task import Problem, Resolution, Specifications, Task +from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin +from pyannote.audio.torchmetrics import ( + DiarizationErrorRate, + FalseAlarmRate, + MissedDetectionRate, + OptimalDiarizationErrorRate, + OptimalDiarizationErrorRateThreshold, + OptimalFalseAlarmRate, + OptimalMissedDetectionRate, + OptimalSpeakerConfusionRate, + SpeakerConfusionRate, +) +from pyannote.audio.utils.loss import binary_cross_entropy, mse_loss, nll_loss +from pyannote.audio.utils.permutation import permutate +from pyannote.audio.utils.powerset import Powerset + +Subsets = list(Subset.__args__) +Scopes = list(Scope.__args__) + + +class StreamingSpeakerDiarization(SegmentationTaskMixin, Task): + """Speaker diarization + + Parameters + ---------- + protocol : SpeakerDiarizationProtocol + pyannote.database protocol + duration : float, optional + Chunks duration. Defaults to 2s. + max_speakers_per_chunk : int, optional + Maximum number of speakers per chunk (must be at least 2). + Defaults to estimating it from the training set. + max_speakers_per_frame : int, optional + Maximum number of (overlapping) speakers per frame. + Setting this value to 1 or more enables `powerset multi-class` training. + Default behavior is to use `multi-label` training. + weigh_by_cardinality: bool, optional + Weigh each powerset classes by the size of the corresponding speaker set. + In other words, {0, 1} powerset class weight is 2x bigger than that of {0} + or {1} powerset classes. Note that empty (non-speech) powerset class is + assigned the same weight as mono-speaker classes. Defaults to False (i.e. use + same weight for every class). Has no effect with `multi-label` training. + warm_up : float or (float, float), optional + Use that many seconds on the left- and rightmost parts of each chunk + to warm up the model. While the model does process those left- and right-most + parts, only the remaining central part of each chunk is used for computing the + loss during training, and for aggregating scores during inference. + Defaults to 0. (i.e. no warm-up). + balance: Sequence[Text], optional + When provided, training samples are sampled uniformly with respect to these keys. + For instance, setting `balance` to ["database","subset"] will make sure that each + database & subset combination will be equally represented in the training samples. + weight: str, optional + When provided, use this key as frame-wise weight in loss function. + batch_size : int, optional + Number of training samples per batch. Defaults to 32. + num_workers : int, optional + Number of workers used for generating training samples. + Defaults to multiprocessing.cpu_count() // 2. + pin_memory : bool, optional + If True, data loaders will copy tensors into CUDA pinned + memory before returning them. See pytorch documentation + for more details. Defaults to False. + augmentation : BaseWaveformTransform, optional + torch_audiomentations waveform transform, used by dataloader + during training. + vad_loss : {"bce", "mse"}, optional + Add voice activity detection loss. + Cannot be used in conjunction with `max_speakers_per_frame`. + metric : optional + Validation metric(s). Can be anything supported by torchmetrics.MetricCollection. + Defaults to AUROC (area under the ROC curve). + + References + ---------- + Hervé Bredin and Antoine Laurent + "End-To-End Speaker Segmentation for Overlap-Aware Resegmentation." + Proc. Interspeech 2021 + + Zhihao Du, Shiliang Zhang, Siqi Zheng, and Zhijie Yan + "Speaker Embedding-aware Neural Diarization: an Efficient Framework for Overlapping + Speech Diarization in Meeting Scenarios" + https://arxiv.org/abs/2203.09767 + + """ + + def __init__( + self, + protocol: SpeakerDiarizationProtocol, + duration: float = 2.0, + max_speakers_per_chunk: int = None, + max_speakers_per_frame: int = None, + weigh_by_cardinality: bool = False, + warm_up: Union[float, Tuple[float, float]] = 0.0, + balance: Sequence[Text] = None, + weight: Text = None, + batch_size: int = 32, + num_workers: int = None, + pin_memory: bool = False, + augmentation: BaseWaveformTransform = None, + vad_loss: Literal["bce", "mse"] = None, + metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None, + max_num_speakers: int = None, # deprecated in favor of `max_speakers_per_chunk`` + loss: Literal["bce", "mse"] = None, # deprecated + latency: float = 0.0, + + ): + super().__init__( + protocol, + duration=duration, + warm_up=warm_up, + batch_size=batch_size, + num_workers=num_workers, + pin_memory=pin_memory, + augmentation=augmentation, + metric=metric, + ) + + if not isinstance(protocol, SpeakerDiarizationProtocol): + raise ValueError( + "SpeakerDiarization task requires a SpeakerDiarizationProtocol." + ) + + # deprecation warnings + if max_speakers_per_chunk is None and max_num_speakers is not None: + max_speakers_per_chunk = max_num_speakers + warnings.warn( + "`max_num_speakers` has been deprecated in favor of `max_speakers_per_chunk`." + ) + if loss is not None: + warnings.warn("`loss` has been deprecated and has no effect.") + + # parameter validation + if max_speakers_per_frame is not None: + if max_speakers_per_frame < 1: + raise ValueError( + f"`max_speakers_per_frame` must be 1 or more (you used {max_speakers_per_frame})." + ) + if vad_loss is not None: + raise ValueError( + "`vad_loss` cannot be used jointly with `max_speakers_per_frame`" + ) + + self.max_speakers_per_chunk = max_speakers_per_chunk + self.max_speakers_per_frame = max_speakers_per_frame + self.weigh_by_cardinality = weigh_by_cardinality + self.balance = balance + self.weight = weight + self.vad_loss = vad_loss + self.latency=latency + + + def setup(self): + super().setup() + + # estimate maximum number of speakers per chunk when not provided + if self.max_speakers_per_chunk is None: + training = self.metadata["subset"] == Subsets.index("train") + + num_unique_speakers = [] + progress_description = f"Estimating maximum number of speakers per {self.duration:g}s chunk in the training set" + for file_id in track( + np.where(training)[0], description=progress_description + ): + annotations = self.annotations[ + np.where(self.annotations["file_id"] == file_id)[0] + ] + annotated_regions = self.annotated_regions[ + np.where(self.annotated_regions["file_id"] == file_id)[0] + ] + for region in annotated_regions: + # find annotations within current region + region_start = region["start"] + region_end = region["end"] + region_annotations = annotations[ + np.where( + (annotations["start"] >= region_start) + * (annotations["end"] <= region_end) + )[0] + ] + + for window_start in np.arange( + region_start, region_end - self.duration, 0.25 * self.duration + ): + window_end = window_start + self.duration + window_annotations = region_annotations[ + np.where( + (region_annotations["start"] <= window_end) + * (region_annotations["end"] >= window_start) + )[0] + ] + num_unique_speakers.append( + len(np.unique(window_annotations["file_label_idx"])) + ) + + # because there might a few outliers, estimate the upper bound for the + # number of speakers as the 97th percentile + + num_speakers, counts = zip(*list(Counter(num_unique_speakers).items())) + num_speakers, counts = np.array(num_speakers), np.array(counts) + + sorting_indices = np.argsort(num_speakers) + num_speakers = num_speakers[sorting_indices] + counts = counts[sorting_indices] + + ratios = np.cumsum(counts) / np.sum(counts) + + for k, ratio in zip(num_speakers, ratios): + if k == 0: + print(f" - {ratio:7.2%} of all chunks contain no speech at all.") + elif k == 1: + print(f" - {ratio:7.2%} contain 1 speaker or less") + else: + print(f" - {ratio:7.2%} contain {k} speakers or less") + + self.max_speakers_per_chunk = max( + 2, + num_speakers[np.where(ratios > 0.97)[0][0]], + ) + + print( + f"Setting `max_speakers_per_chunk` to {self.max_speakers_per_chunk}. " + f"You can override this value (or avoid this estimation step) by passing `max_speakers_per_chunk={self.max_speakers_per_chunk}` to the task constructor." + ) + + if ( + self.max_speakers_per_frame is not None + and self.max_speakers_per_frame > self.max_speakers_per_chunk + ): + raise ValueError( + f"`max_speakers_per_frame` ({self.max_speakers_per_frame}) must be smaller " + f"than `max_speakers_per_chunk` ({self.max_speakers_per_chunk})" + ) + + # now that we know about the number of speakers upper bound + # we can set task specifications + self.specifications = Specifications( + problem=Problem.MULTI_LABEL_CLASSIFICATION + if self.max_speakers_per_frame is None + else Problem.MONO_LABEL_CLASSIFICATION, + resolution=Resolution.FRAME, + duration=self.duration, + min_duration=self.min_duration, + warm_up=self.warm_up, + classes=[f"speaker#{i+1}" for i in range(self.max_speakers_per_chunk)], + powerset_max_classes=self.max_speakers_per_frame, + permutation_invariant=True, + ) + + def setup_loss_func(self): + if self.specifications.powerset: + self.model.powerset = Powerset( + len(self.specifications.classes), + self.specifications.powerset_max_classes, + ) + + def prepare_chunk(self, file_id: int, start_time: float, duration: float): + """Prepare chunk + + Parameters + ---------- + file_id : int + File index + start_time : float + Chunk start time + duration : float + Chunk duration. + + Returns + ------- + sample : dict + Dictionary containing the chunk data with the following keys: + - `X`: waveform + - `y`: target as a SlidingWindowFeature instance where y.labels is + in meta.scope space. + - `meta`: + - `scope`: target scope (0: file, 1: database, 2: global) + - `database`: database index + - `file`: file index + """ + + file = self.get_file(file_id) + + # get label scope + label_scope = Scopes[self.metadata[file_id]["scope"]] + label_scope_key = f"{label_scope}_label_idx" + + # + chunk = Segment(start_time, start_time + duration) + + sample = dict() + sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration) + + # gather all annotations of current file + annotations = self.annotations[self.annotations["file_id"] == file_id] + + # gather all annotations with non-empty intersection with current chunk + chunk_annotations = annotations[ + (annotations["start"] < chunk.end) & (annotations["end"] > chunk.start) + ] + + # discretize chunk annotations at model output resolution + start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start + start_idx = np.floor(start / self.model.example_output.frames.step).astype(int) + end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start + end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int) + + # get list and number of labels for current scope + labels = list(np.unique(chunk_annotations[label_scope_key])) + num_labels = len(labels) + + if num_labels > self.max_speakers_per_chunk: + pass + + # initial frame-level targets + y = np.zeros((self.model.example_output.num_frames, num_labels), dtype=np.uint8) + + # map labels to indices + mapping = {label: idx for idx, label in enumerate(labels)} + + for start, end, label in zip( + start_idx, end_idx, chunk_annotations[label_scope_key] + ): + mapped_label = mapping[label] + y[start:end, mapped_label] = 1 + + sample["y"] = SlidingWindowFeature( + y, self.model.example_output.frames, labels=labels + ) + + metadata = self.metadata[file_id] + sample["meta"] = {key: metadata[key] for key in metadata.dtype.names} + sample["meta"]["file"] = file_id + + return sample + + def collate_y(self, batch) -> torch.Tensor: + """ + + Parameters + ---------- + batch : list + List of samples to collate. + "y" field is expected to be a SlidingWindowFeature. + + Returns + ------- + y : torch.Tensor + Collated target tensor of shape (num_frames, self.max_speakers_per_chunk) + If one chunk has more than `self.max_speakers_per_chunk` speakers, we keep + the max_speakers_per_chunk most talkative ones. If it has less, we pad with + zeros (artificial inactive speakers). + """ + + collated_y = [] + for b in batch: + y = b["y"].data + num_speakers = len(b["y"].labels) + if num_speakers > self.max_speakers_per_chunk: + # sort speakers in descending talkativeness order + indices = np.argsort(-np.sum(y, axis=0), axis=0) + # keep only the most talkative speakers + y = y[:, indices[: self.max_speakers_per_chunk]] + + # TODO: we should also sort the speaker labels in the same way + + elif num_speakers < self.max_speakers_per_chunk: + # create inactive speakers by zero padding + y = np.pad( + y, + ((0, 0), (0, self.max_speakers_per_chunk - num_speakers)), + mode="constant", + ) + + else: + # we have exactly the right number of speakers + pass + + collated_y.append(y) + + return torch.from_numpy(np.stack(collated_y)) + + def segmentation_loss( + self, + permutated_prediction: torch.Tensor, + target: torch.Tensor, + weight: torch.Tensor = None, + ) -> torch.Tensor: + """Permutation-invariant segmentation loss + + Parameters + ---------- + permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor + Permutated speaker activity predictions. + target : (batch_size, num_frames, num_speakers) torch.Tensor + Speaker activity. + weight : (batch_size, num_frames, 1) torch.Tensor, optional + Frames weight. + + Returns + ------- + seg_loss : torch.Tensor + Permutation-invariant segmentation loss + """ + if self.specifications.powerset: + # `clamp_min` is needed to set non-speech weight to 1. + class_weight = ( + torch.clamp_min(self.model.powerset.cardinality, 1.0) + if self.weigh_by_cardinality + else None + ) + + seg_loss = nll_loss( + permutated_prediction, + torch.argmax(target, dim=-1), + class_weight=class_weight, + weight=weight, + ) + else: + seg_loss = binary_cross_entropy( + permutated_prediction, target.float(), weight=weight + ) + + return seg_loss + + def voice_activity_detection_loss( + self, + permutated_prediction: torch.Tensor, + target: torch.Tensor, + weight: torch.Tensor = None, + ) -> torch.Tensor: + """Voice activity detection loss + + Parameters + ---------- + permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor + Speaker activity predictions. + target : (batch_size, num_frames, num_speakers) torch.Tensor + Speaker activity. + weight : (batch_size, num_frames, 1) torch.Tensor, optional + Frames weight. + + Returns + ------- + vad_loss : torch.Tensor + Voice activity detection loss. + """ + + vad_prediction, _ = torch.max(permutated_prediction, dim=2, keepdim=True) + # (batch_size, num_frames, 1) + + vad_target, _ = torch.max(target.float(), dim=2, keepdim=False) + # (batch_size, num_frames) + + if self.vad_loss == "bce": + loss = binary_cross_entropy(vad_prediction, vad_target, weight=weight) + + elif self.vad_loss == "mse": + loss = mse_loss(vad_prediction, vad_target, weight=weight) + + return loss + + def training_step(self, batch, batch_idx: int): + """Compute permutation-invariant segmentation loss + + Parameters + ---------- + batch : (usually) dict of torch.Tensor + Current batch. + batch_idx: int + Batch index. + + Returns + ------- + loss : {str: torch.tensor} + {"loss": loss} + """ + + # target + target = batch["y"] + # (batch_size, num_frames, num_speakers) + + waveform = batch["X"] + # (batch_size, num_channels, num_samples) + + # drop samples that contain too many speakers + num_speakers: torch.Tensor = torch.sum(torch.any(target, dim=1), dim=1) + keep: torch.Tensor = num_speakers <= self.max_speakers_per_chunk + target = target[keep] + waveform = waveform[keep] + + # corner case + if not keep.any(): + return None + + # forward pass + prediction = self.model(waveform) + batch_size, num_frames, _ = prediction.shape + # (batch_size, num_frames, num_classes) + + # frames weight + weight_key = getattr(self, "weight", None) + weight = batch.get( + weight_key, + torch.ones(batch_size, num_frames, 1, device=self.model.device), + ) + # (batch_size, num_frames, 1) + + # warm-up + warm_up_left = round(self.warm_up[0] / self.duration * num_frames) + weight[:, :warm_up_left] = 0.0 + warm_up_right = round(self.warm_up[1] / self.duration * num_frames) + weight[:, num_frames - warm_up_right :] = 0.0 + + delay = int(np.floor(num_frames * self.latency / self.duration)) # round down + + prediction = prediction[:, delay:, :] + target = target[:, :num_frames-delay, :] + + if self.specifications.powerset: + multilabel = self.model.powerset.to_multilabel(prediction) + permutated_target, _ = permutate(multilabel, target) + permutated_target_powerset = self.model.powerset.to_powerset( + permutated_target.float() + ) + seg_loss = self.segmentation_loss( + prediction, permutated_target_powerset, weight=weight + ) + + else: + permutated_prediction, _ = permutate(target, prediction) + seg_loss = self.segmentation_loss( + permutated_prediction, target, weight=weight + ) + + self.model.log( + "loss/train/segmentation", + seg_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + if self.vad_loss is None: + vad_loss = 0.0 + + else: + # TODO: vad_loss probably does not make sense in powerset mode + # because first class (empty set of labels) does exactly this... + if self.specifications.powerset: + vad_loss = self.voice_activity_detection_loss( + prediction, permutated_target_powerset, weight=weight + ) + + else: + vad_loss = self.voice_activity_detection_loss( + permutated_prediction, target, weight=weight + ) + + self.model.log( + "loss/train/vad", + vad_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + loss = seg_loss + vad_loss + + # skip batch if something went wrong for some reason + if torch.isnan(loss): + return None + + self.model.log( + "loss/train", + loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + return {"loss": loss} + + def default_metric( + self, + ) -> Union[Metric, Sequence[Metric], Dict[str, Metric]]: + """Returns diarization error rate and its components""" + + if self.specifications.powerset: + return { + "DiarizationErrorRate": DiarizationErrorRate(0.5), + "DiarizationErrorRate/Confusion": SpeakerConfusionRate(0.5), + "DiarizationErrorRate/Miss": MissedDetectionRate(0.5), + "DiarizationErrorRate/FalseAlarm": FalseAlarmRate(0.5), + } + + return { + "DiarizationErrorRate": OptimalDiarizationErrorRate(), + "DiarizationErrorRate/Threshold": OptimalDiarizationErrorRateThreshold(), + "DiarizationErrorRate/Confusion": OptimalSpeakerConfusionRate(), + "DiarizationErrorRate/Miss": OptimalMissedDetectionRate(), + "DiarizationErrorRate/FalseAlarm": OptimalFalseAlarmRate(), + } + + # TODO: no need to compute gradient in this method + def validation_step(self, batch, batch_idx: int): + """Compute validation loss and metric + + Parameters + ---------- + batch : dict of torch.Tensor + Current batch. + batch_idx: int + Batch index. + """ + + # target + target = batch["y"] + # (batch_size, num_frames, num_speakers) + + waveform = batch["X"] + # (batch_size, num_channels, num_samples) + + # TODO: should we handle validation samples with too many speakers + # waveform = waveform[keep] + # target = target[keep] + + # forward pass + prediction = self.model(waveform) + batch_size, num_frames, _ = prediction.shape + + # frames weight + weight_key = getattr(self, "weight", None) + weight = batch.get( + weight_key, + torch.ones(batch_size, num_frames, 1, device=self.model.device), + ) + # (batch_size, num_frames, 1) + + # warm-up + warm_up_left = round(self.warm_up[0] / self.duration * num_frames) + weight[:, :warm_up_left] = 0.0 + warm_up_right = round(self.warm_up[1] / self.duration * num_frames) + weight[:, num_frames - warm_up_right :] = 0.0 + + delay = int(np.floor(num_frames * self.latency / self.duration)) # round down + + prediction = prediction[:, delay:, :] + target = target[:, :num_frames-delay, :] + + + if self.specifications.powerset: + multilabel = self.model.powerset.to_multilabel(prediction) + permutated_target, _ = permutate(multilabel, target) + + # FIXME: handle case where target have too many speakers? + # since we don't need + permutated_target_powerset = self.model.powerset.to_powerset( + permutated_target.float() + ) + seg_loss = self.segmentation_loss( + prediction, permutated_target_powerset, weight=weight + ) + + else: + permutated_prediction, _ = permutate(target, prediction) + seg_loss = self.segmentation_loss( + permutated_prediction, target, weight=weight + ) + + self.model.log( + "loss/val/segmentation", + seg_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + if self.vad_loss is None: + vad_loss = 0.0 + + else: + # TODO: vad_loss probably does not make sense in powerset mode + # because first class (empty set of labels) does exactly this... + if self.specifications.powerset: + vad_loss = self.voice_activity_detection_loss( + prediction, permutated_target_powerset, weight=weight + ) + + else: + vad_loss = self.voice_activity_detection_loss( + permutated_prediction, target, weight=weight + ) + + self.model.log( + "loss/val/vad", + vad_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + loss = seg_loss + vad_loss + + self.model.log( + "loss/val", + loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + if self.specifications.powerset: + self.model.validation_metric( + torch.transpose( + multilabel[:, warm_up_left : num_frames - warm_up_right], 1, 2 + ), + torch.transpose( + target[:, warm_up_left : num_frames - warm_up_right], 1, 2 + ), + ) + else: + self.model.validation_metric( + torch.transpose( + prediction[:, warm_up_left : num_frames - warm_up_right], 1, 2 + ), + torch.transpose( + target[:, warm_up_left : num_frames - warm_up_right], 1, 2 + ), + ) + + self.model.log_dict( + self.model.validation_metric, + on_step=False, + on_epoch=True, + prog_bar=True, + logger=True, + ) + + # log first batch visualization every 2^n epochs. + if ( + self.model.current_epoch == 0 + or math.log2(self.model.current_epoch) % 1 > 0 + or batch_idx > 0 + ): + return + + # visualize first 9 validation samples of first batch in Tensorboard/MLflow + + if self.specifications.powerset: + y = permutated_target.float().cpu().numpy() + y_pred = multilabel.cpu().numpy() + else: + y = target.float().cpu().numpy() + y_pred = permutated_prediction.cpu().numpy() + + # prepare 3 x 3 grid (or smaller if batch size is smaller) + num_samples = min(self.batch_size, 9) + nrows = math.ceil(math.sqrt(num_samples)) + ncols = math.ceil(num_samples / nrows) + fig, axes = plt.subplots( + nrows=2 * nrows, ncols=ncols, figsize=(8, 5), squeeze=False + ) + + # reshape target so that there is one line per class when plotting it + y[y == 0] = np.NaN + if len(y.shape) == 2: + y = y[:, :, np.newaxis] + y *= np.arange(y.shape[2]) + + # plot each sample + for sample_idx in range(num_samples): + # find where in the grid it should be plotted + row_idx = sample_idx // nrows + col_idx = sample_idx % ncols + + # plot target + ax_ref = axes[row_idx * 2 + 0, col_idx] + sample_y = y[sample_idx] + ax_ref.plot(sample_y) + ax_ref.set_xlim(0, len(sample_y)) + ax_ref.set_ylim(-1, sample_y.shape[1]) + ax_ref.get_xaxis().set_visible(False) + ax_ref.get_yaxis().set_visible(False) + + # plot predictions + ax_hyp = axes[row_idx * 2 + 1, col_idx] + sample_y_pred = y_pred[sample_idx] + ax_hyp.axvspan(0, warm_up_left, color="k", alpha=0.5, lw=0) + ax_hyp.axvspan( + num_frames - warm_up_right, num_frames, color="k", alpha=0.5, lw=0 + ) + ax_hyp.plot(sample_y_pred) + ax_hyp.set_ylim(-0.1, 1.1) + ax_hyp.set_xlim(0, len(sample_y)) + ax_hyp.get_xaxis().set_visible(False) + + plt.tight_layout() + + for logger in self.model.loggers: + if isinstance(logger, TensorBoardLogger): + logger.experiment.add_figure("samples", fig, self.model.current_epoch) + elif isinstance(logger, MLFlowLogger): + logger.experiment.log_figure( + run_id=logger.run_id, + figure=fig, + artifact_file=f"samples_epoch{self.model.current_epoch}.png", + ) + + plt.close(fig) + + +def main(protocol: str, subset: str = "test", model: str = "pyannote/segmentation"): + """Evaluate a segmentation model""" + + from pyannote.database import FileFinder, get_protocol + from rich.progress import Progress + + from pyannote.audio import Inference + from pyannote.audio.pipelines.utils import get_devices + from pyannote.audio.utils.metric import DiscreteDiarizationErrorRate + from pyannote.audio.utils.signal import binarize + + (device,) = get_devices(needs=1) + metric = DiscreteDiarizationErrorRate() + protocol = get_protocol(protocol, preprocessors={"audio": FileFinder()}) + files = list(getattr(protocol, subset)()) + + with Progress() as progress: + main_task = progress.add_task(protocol.name, total=len(files)) + file_task = progress.add_task("Processing", total=1.0) + + def progress_hook(completed: int = None, total: int = None): + progress.update(file_task, completed=completed / total) + + inference = Inference(model, device=device) + + for file in files: + progress.update(file_task, description=file["uri"]) + reference = file["annotation"] + hypothesis = binarize(inference(file, hook=progress_hook)) + uem = file["annotated"] + _ = metric(reference, hypothesis, uem=uem) + progress.advance(main_task) + + _ = metric.report(display=True) + + +if __name__ == "__main__": + import typer + + typer.run(main) From 1fd6e604d31729b1ee7e1ed2c7ed9eea4a16a393 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Thu, 30 Nov 2023 15:45:56 +0100 Subject: [PATCH 08/23] Create StreamingSpeakerDiarization pipeline to use models with latencies in a pipeline --- pyannote/audio/pipelines/__init__.py | 2 + .../streaming_speaker_diarization.py | 621 ++++++++++++++++++ pyannote/audio/tasks/__init__.py | 2 + 3 files changed, 625 insertions(+) create mode 100644 pyannote/audio/pipelines/streaming_speaker_diarization.py diff --git a/pyannote/audio/pipelines/__init__.py b/pyannote/audio/pipelines/__init__.py index 0c7d2f25c..06554ac4e 100644 --- a/pyannote/audio/pipelines/__init__.py +++ b/pyannote/audio/pipelines/__init__.py @@ -24,6 +24,7 @@ from .overlapped_speech_detection import OverlappedSpeechDetection from .resegmentation import Resegmentation from .speaker_diarization import SpeakerDiarization +from .streaming_speaker_diarization import StreamingSpeakerDiarization from .voice_activity_detection import VoiceActivityDetection __all__ = [ @@ -32,4 +33,5 @@ "SpeakerDiarization", "Resegmentation", "MultiLabelSegmentation", + "StreamingSpeakerDiarization", ] diff --git a/pyannote/audio/pipelines/streaming_speaker_diarization.py b/pyannote/audio/pipelines/streaming_speaker_diarization.py new file mode 100644 index 000000000..98ed528eb --- /dev/null +++ b/pyannote/audio/pipelines/streaming_speaker_diarization.py @@ -0,0 +1,621 @@ +# The MIT License (MIT) +# +# Copyright (c) 2021- CNRS +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Speaker diarization pipelines""" + +import functools +import itertools +import math +from typing import Callable, Optional, Text, Union + +import numpy as np +import torch +from einops import rearrange +from pyannote.core import Annotation, SlidingWindow, SlidingWindowFeature +from pyannote.metrics.diarization import GreedyDiarizationErrorRate +from pyannote.pipeline.parameter import ParamDict, Uniform + +from pyannote.audio import Audio, Inference, Model, Pipeline +from pyannote.audio.core.io import AudioFile +from pyannote.audio.pipelines.clustering import Clustering +from pyannote.audio.pipelines.speaker_verification import PretrainedSpeakerEmbedding +from pyannote.audio.pipelines.utils import ( + PipelineModel, + SpeakerDiarizationMixin, + get_model, +) +from pyannote.audio.utils.signal import binarize + + +def batchify(iterable, batch_size: int = 32, fillvalue=None): + """Batchify iterable""" + # batchify('ABCDEFG', 3) --> ['A', 'B', 'C'] ['D', 'E', 'F'] [G, ] + args = [iter(iterable)] * batch_size + return itertools.zip_longest(*args, fillvalue=fillvalue) + + +class StreamingSpeakerDiarization(SpeakerDiarizationMixin, Pipeline): + """Speaker diarization pipeline + + Parameters + ---------- + segmentation : Model, str, or dict, optional + Pretrained segmentation model. Defaults to "pyannote/segmentation@2022.07". + See pyannote.audio.pipelines.utils.get_model for supported format. + segmentation_step: float, optional + The segmentation model is applied on a window sliding over the whole audio file. + `segmentation_step` controls the step of this window, provided as a ratio of its + duration. Defaults to 0.1 (i.e. 90% overlap between two consecuive windows). + embedding : Model, str, or dict, optional + Pretrained embedding model. Defaults to "pyannote/embedding@2022.07". + See pyannote.audio.pipelines.utils.get_model for supported format. + embedding_exclude_overlap : bool, optional + Exclude overlapping speech regions when extracting embeddings. + Defaults (False) to use the whole speech. + clustering : str, optional + Clustering algorithm. See pyannote.audio.pipelines.clustering.Clustering + for available options. Defaults to "AgglomerativeClustering". + segmentation_batch_size : int, optional + Batch size used for speaker segmentation. Defaults to 1. + embedding_batch_size : int, optional + Batch size used for speaker embedding. Defaults to 1. + der_variant : dict, optional + Optimize for a variant of diarization error rate. + Defaults to {"collar": 0.0, "skip_overlap": False}. This is used in `get_metric` + when instantiating the metric: GreedyDiarizationErrorRate(**der_variant). + use_auth_token : str, optional + When loading private huggingface.co models, set `use_auth_token` + to True or to a string containing your hugginface.co authentication + token that can be obtained by running `huggingface-cli login` + + Usage + ----- + # perform (unconstrained) diarization + >>> diarization = pipeline("/path/to/audio.wav") + + # perform diarization, targetting exactly 4 speakers + >>> diarization = pipeline("/path/to/audio.wav", num_speakers=4) + + # perform diarization, with at least 2 speakers and at most 10 speakers + >>> diarization = pipeline("/path/to/audio.wav", min_speakers=2, max_speakers=10) + + # perform diarization and get one representative embedding per speaker + >>> diarization, embeddings = pipeline("/path/to/audio.wav", return_embeddings=True) + >>> for s, speaker in enumerate(diarization.labels()): + ... # embeddings[s] is the embedding of speaker `speaker` + + Hyper-parameters + ---------------- + segmentation.threshold + segmentation.min_duration_off + clustering.??? + """ + + def __init__( + self, + segmentation: PipelineModel = "pyannote/segmentation@2022.07", + segmentation_step: float = 0.1, + embedding: PipelineModel = "speechbrain/spkrec-ecapa-voxceleb@5c0be3875fda05e81f3c004ed8c7c06be308de1e", + embedding_exclude_overlap: bool = False, + clustering: str = "AgglomerativeClustering", + embedding_batch_size: int = 1, + segmentation_batch_size: int = 1, + der_variant: dict = None, + use_auth_token: Union[Text, None] = None, + latency: float = 0.0, + ): + super().__init__() + self.latency = latency + self.segmentation_model = segmentation + model: Model = get_model(segmentation, use_auth_token=use_auth_token) + + self.segmentation_step = segmentation_step + + self.embedding = embedding + self.embedding_batch_size = embedding_batch_size + self.embedding_exclude_overlap = embedding_exclude_overlap + + self.klustering = clustering + + self.der_variant = der_variant or {"collar": 0.0, "skip_overlap": False} + + segmentation_duration = model.specifications.duration + self._segmentation = Inference( + model, + duration=segmentation_duration, + step=self.segmentation_step * segmentation_duration, + skip_aggregation=True, + batch_size=segmentation_batch_size, + ) + self._frames: SlidingWindow = self._segmentation.model.example_output.frames + + if self._segmentation.model.specifications.powerset: + self.segmentation = ParamDict( + min_duration_off=Uniform(0.0, 1.0), + ) + + else: + self.segmentation = ParamDict( + threshold=Uniform(0.1, 0.9), + min_duration_off=Uniform(0.0, 1.0), + ) + + if self.klustering == "OracleClustering": + metric = "not_applicable" + + else: + self._embedding = PretrainedSpeakerEmbedding( + self.embedding, use_auth_token=use_auth_token + ) + self._audio = Audio(sample_rate=self._embedding.sample_rate, mono="downmix") + metric = self._embedding.metric + + try: + Klustering = Clustering[clustering] + except KeyError: + raise ValueError( + f'clustering must be one of [{", ".join(list(Clustering.__members__))}]' + ) + self.clustering = Klustering.value(metric=metric) + + @property + def segmentation_batch_size(self) -> int: + return self._segmentation.batch_size + + @segmentation_batch_size.setter + def segmentation_batch_size(self, batch_size: int): + self._segmentation.batch_size = batch_size + + def default_parameters(self): + raise NotImplementedError() + + def classes(self): + speaker = 0 + while True: + yield f"SPEAKER_{speaker:02d}" + speaker += 1 + + @property + def CACHED_SEGMENTATION(self): + return "training_cache/segmentation" + + def get_segmentations(self, file, hook=None) -> SlidingWindowFeature: + """Apply segmentation model + + Parameter + --------- + file : AudioFile + hook : Optional[Callable] + + Returns + ------- + segmentations : (num_chunks, num_frames, num_speakers) SlidingWindowFeature + """ + + if hook is not None: + hook = functools.partial(hook, "segmentation", None) + + if self.training: + if self.CACHED_SEGMENTATION in file: + segmentations = file[self.CACHED_SEGMENTATION] + else: + segmentations = self._segmentation(file, hook=hook) + file[self.CACHED_SEGMENTATION] = segmentations + else: + segmentations: SlidingWindowFeature = self._segmentation(file, hook=hook) + + return segmentations + + def get_embeddings( + self, + file, + binary_segmentations: SlidingWindowFeature, + exclude_overlap: bool = False, + hook: Optional[Callable] = None, + ): + """Extract embeddings for each (chunk, speaker) pair + + Parameters + ---------- + file : AudioFile + binary_segmentations : (num_chunks, num_frames, num_speakers) SlidingWindowFeature + Binarized segmentation. + exclude_overlap : bool, optional + Exclude overlapping speech regions when extracting embeddings. + In case non-overlapping speech is too short, use the whole speech. + hook: Optional[Callable] + Called during embeddings after every batch to report the progress + + Returns + ------- + embeddings : (num_chunks, num_speakers, dimension) array + """ + + # when optimizing the hyper-parameters of this pipeline with frozen + # "segmentation.threshold", one can reuse the embeddings from the first trial, + # bringing a massive speed up to the optimization process (and hence allowing to use + # a larger search space). + if self.training: + # we only re-use embeddings if they were extracted based on the same value of the + # "segmentation.threshold" hyperparameter or if the segmentation model relies on + # `powerset` mode + cache = file.get("training_cache/embeddings", dict()) + if ("embeddings" in cache) and ( + self._segmentation.model.specifications.powerset + or (cache["segmentation.threshold"] == self.segmentation.threshold) + ): + return cache["embeddings"] + + duration = binary_segmentations.sliding_window.duration + num_chunks, num_frames, num_speakers = binary_segmentations.data.shape + + if exclude_overlap: + # minimum number of samples needed to extract an embedding + # (a lower number of samples would result in an error) + min_num_samples = self._embedding.min_num_samples + + # corresponding minimum number of frames + num_samples = duration * self._embedding.sample_rate + min_num_frames = math.ceil(num_frames * min_num_samples / num_samples) + + # zero-out frames with overlapping speech + clean_frames = 1.0 * ( + np.sum(binary_segmentations.data, axis=2, keepdims=True) < 2 + ) + clean_segmentations = SlidingWindowFeature( + binary_segmentations.data * clean_frames, + binary_segmentations.sliding_window, + ) + + else: + min_num_frames = -1 + clean_segmentations = SlidingWindowFeature( + binary_segmentations.data, binary_segmentations.sliding_window + ) + + def iter_waveform_and_mask(): + for (chunk, masks), (_, clean_masks) in zip( + binary_segmentations, clean_segmentations + ): + # chunk: Segment(t, t + duration) + # masks: (num_frames, local_num_speakers) np.ndarray + + waveform, _ = self._audio.crop( + file, + chunk, + duration=duration, + mode="pad", + ) + # waveform: (1, num_samples) torch.Tensor + + # mask may contain NaN (in case of partial stitching) + masks = np.nan_to_num(masks, nan=0.0).astype(np.float32) + clean_masks = np.nan_to_num(clean_masks, nan=0.0).astype(np.float32) + + for mask, clean_mask in zip(masks.T, clean_masks.T): + # mask: (num_frames, ) np.ndarray + + if np.sum(clean_mask) > min_num_frames: + used_mask = clean_mask + else: + used_mask = mask + + yield waveform[None], torch.from_numpy(used_mask)[None] + # w: (1, 1, num_samples) torch.Tensor + # m: (1, num_frames) torch.Tensor + + batches = batchify( + iter_waveform_and_mask(), + batch_size=self.embedding_batch_size, + fillvalue=(None, None), + ) + + batch_count = math.ceil(num_chunks * num_speakers / self.embedding_batch_size) + + embedding_batches = [] + + if hook is not None: + hook("embeddings", None, total=batch_count, completed=0) + + for i, batch in enumerate(batches, 1): + waveforms, masks = zip(*filter(lambda b: b[0] is not None, batch)) + + waveform_batch = torch.vstack(waveforms) + # (batch_size, 1, num_samples) torch.Tensor + + mask_batch = torch.vstack(masks) + # (batch_size, num_frames) torch.Tensor + + embedding_batch: np.ndarray = self._embedding( + waveform_batch, masks=mask_batch + ) + # (batch_size, dimension) np.ndarray + + embedding_batches.append(embedding_batch) + + if hook is not None: + hook("embeddings", embedding_batch, total=batch_count, completed=i) + + embedding_batches = np.vstack(embedding_batches) + + embeddings = rearrange(embedding_batches, "(c s) d -> c s d", c=num_chunks) + + # caching embeddings for subsequent trials + # (see comments at the top of this method for more details) + if self.training: + if self._segmentation.model.specifications.powerset: + file["training_cache/embeddings"] = { + "embeddings": embeddings, + } + else: + file["training_cache/embeddings"] = { + "segmentation.threshold": self.segmentation.threshold, + "embeddings": embeddings, + } + + return embeddings + + def reconstruct( + self, + segmentations: SlidingWindowFeature, + hard_clusters: np.ndarray, + count: SlidingWindowFeature, + ) -> SlidingWindowFeature: + """Build final discrete diarization out of clustered segmentation + + Parameters + ---------- + segmentations : (num_chunks, num_frames, num_speakers) SlidingWindowFeature + Raw speaker segmentation. + hard_clusters : (num_chunks, num_speakers) array + Output of clustering step. + count : (total_num_frames, 1) SlidingWindowFeature + Instantaneous number of active speakers. + + Returns + ------- + discrete_diarization : SlidingWindowFeature + Discrete (0s and 1s) diarization. + """ + + num_chunks, num_frames, local_num_speakers = segmentations.data.shape + + num_clusters = np.max(hard_clusters) + 1 + clustered_segmentations = np.NAN * np.zeros( + (num_chunks, num_frames, num_clusters) + ) + + for c, (cluster, (chunk, segmentation)) in enumerate( + zip(hard_clusters, segmentations) + ): + # cluster is (local_num_speakers, )-shaped + # segmentation is (num_frames, local_num_speakers)-shaped + for k in np.unique(cluster): + if k == -2: + continue + + # TODO: can we do better than this max here? + clustered_segmentations[c, :, k] = np.max( + segmentation[:, cluster == k], axis=1 + ) + + clustered_segmentations = SlidingWindowFeature( + clustered_segmentations, segmentations.sliding_window + ) + + return self.to_diarization(clustered_segmentations, count) + + def apply( + self, + file: AudioFile, + num_speakers: int = None, + min_speakers: int = None, + max_speakers: int = None, + return_embeddings: bool = False, + hook: Optional[Callable] = None, + ) -> Annotation: + """Apply speaker diarization + + Parameters + ---------- + file : AudioFile + Processed file. + num_speakers : int, optional + Number of speakers, when known. + min_speakers : int, optional + Minimum number of speakers. Has no effect when `num_speakers` is provided. + max_speakers : int, optional + Maximum number of speakers. Has no effect when `num_speakers` is provided. + return_embeddings : bool, optional + Return representative speaker embeddings. + hook : callable, optional + Callback called after each major steps of the pipeline as follows: + hook(step_name, # human-readable name of current step + step_artefact, # artifact generated by current step + file=file) # file being processed + Time-consuming steps call `hook` multiple times with the same `step_name` + and additional `completed` and `total` keyword arguments usable to track + progress of current step. + + Returns + ------- + diarization : Annotation + Speaker diarization + embeddings : np.array, optional + Representative speaker embeddings such that `embeddings[i]` is the + speaker embedding for i-th speaker in diarization.labels(). + Only returned when `return_embeddings` is True. + """ + + # setup hook (e.g. for debugging purposes) + hook = self.setup_hook(file, hook=hook) + + num_speakers, min_speakers, max_speakers = self.set_num_speakers( + num_speakers=num_speakers, + min_speakers=min_speakers, + max_speakers=max_speakers, + ) + + segmentations = self.get_segmentations(file, hook=hook) + + # change the sliding window and shift the data of segmentations according to latency + sliding_window = segmentations.sliding_window + new_sliding_window = SlidingWindow(start=sliding_window.start, end = sliding_window.end, step = sliding_window.step, duration=sliding_window.duration-self.latency) + segmentations.sliding_window = new_sliding_window + segmentations.data = segmentations.data[:,self._frames.closest_frame(self.latency):,:] + print(self._frames.closest_frame(self.latency)) + + hook("segmentation", segmentations) + # shape: (num_chunks, num_frames, local_num_speakers) + + # estimate frame-level number of instantaneous speakers + count = self.speaker_count( + segmentations, + onset=0.5 + if self._segmentation.model.specifications.powerset + else self.segmentation.threshold, + frames=self._frames, + warm_up=(0.0, 0.0), + ) + + print(count.data.shape) + + hook("speaker_counting", count) + # shape: (num_frames, 1) + # dtype: int + + # exit early when no speaker is ever active + if np.nanmax(count.data) == 0.0: + diarization = Annotation(uri=file["uri"]) + if return_embeddings: + return diarization, np.zeros((0, self._embedding.dimension)) + + return diarization + + # binarize segmentation + if self._segmentation.model.specifications.powerset: + binarized_segmentations = segmentations + else: + binarized_segmentations: SlidingWindowFeature = binarize( + segmentations, + onset=self.segmentation.threshold, + initial_state=False, + ) + + if self.klustering == "OracleClustering" and not return_embeddings: + embeddings = None + else: + print("before embedding") + embeddings = self.get_embeddings( + file, + binarized_segmentations, + exclude_overlap=self.embedding_exclude_overlap, + hook=hook, + ) + hook("embeddings", embeddings) + # shape: (num_chunks, local_num_speakers, dimension) + print("before clustering") + hard_clusters, _, centroids = self.clustering( + embeddings=embeddings, + segmentations=binarized_segmentations, + num_clusters=num_speakers, + min_clusters=min_speakers, + max_clusters=max_speakers, + file=file, # <== for oracle clustering + frames=self._frames, # <== for oracle clustering + ) + # hard_clusters: (num_chunks, num_speakers) + # centroids: (num_speakers, dimension) + + # reconstruct discrete diarization from raw hard clusters + + # keep track of inactive speakers + inactive_speakers = np.sum(binarized_segmentations.data, axis=1) == 0 + # shape: (num_chunks, num_speakers) + + hard_clusters[inactive_speakers] = -2 + + print("before reconstruct") + discrete_diarization = self.reconstruct( + segmentations, + hard_clusters, + count, + ) + hook("discrete_diarization", discrete_diarization) + + # convert to continuous diarization + diarization = self.to_annotation( + discrete_diarization, + min_duration_on=0.0, + min_duration_off=self.segmentation.min_duration_off, + ) + diarization.uri = file["uri"] + + # at this point, `diarization` speaker labels are integers + # from 0 to `num_speakers - 1`, aligned with `centroids` rows. + + if "annotation" in file and file["annotation"]: + # when reference is available, use it to map hypothesized speakers + # to reference speakers (this makes later error analysis easier + # but does not modify the actual output of the diarization pipeline) + _, mapping = self.optimal_mapping( + file["annotation"], diarization, return_mapping=True + ) + + # in case there are more speakers in the hypothesis than in + # the reference, those extra speakers are missing from `mapping`. + # we add them back here + mapping = {key: mapping.get(key, key) for key in diarization.labels()} + + else: + # when reference is not available, rename hypothesized speakers + # to human-readable SPEAKER_00, SPEAKER_01, ... + mapping = { + label: expected_label + for label, expected_label in zip(diarization.labels(), self.classes()) + } + + diarization = diarization.rename_labels(mapping=mapping) + + # at this point, `diarization` speaker labels are strings (or mix of + # strings and integers when reference is available and some hypothesis + # speakers are not present in the reference) + + if not return_embeddings: + return diarization + + # re-order centroids so that they match + # the order given by diarization.labels() + inverse_mapping = {label: index for index, label in mapping.items()} + centroids = centroids[ + [inverse_mapping[label] for label in diarization.labels()] + ] + + # FIXME: the number of centroids may be smaller than the number of speakers + # in the annotation. This can happen if the number of active speakers + # obtained from `speaker_count` for some frames is larger than the number + # of clusters obtained from `clustering`. Will be fixed in the future + + return diarization, centroids + + def get_metric(self) -> GreedyDiarizationErrorRate: + return GreedyDiarizationErrorRate(**self.der_variant) diff --git a/pyannote/audio/tasks/__init__.py b/pyannote/audio/tasks/__init__.py index 6cbba258f..c8483711a 100644 --- a/pyannote/audio/tasks/__init__.py +++ b/pyannote/audio/tasks/__init__.py @@ -22,6 +22,7 @@ from .segmentation.multilabel import MultiLabelSegmentation # isort:skip from .segmentation.speaker_diarization import SpeakerDiarization # isort:skip +from .segmentation.streaming_speaker_diarization import StreamingSpeakerDiarization # isort:skip from .segmentation.voice_activity_detection import VoiceActivityDetection # isort:skip from .segmentation.overlapped_speech_detection import ( # isort:skip OverlappedSpeechDetection, @@ -36,6 +37,7 @@ __all__ = [ "SpeakerDiarization", + "StreamingSpeakerDiarization", "VoiceActivityDetection", "OverlappedSpeechDetection", "MultiLabelSegmentation", From 0a874e8d030f8c1d9c3a34967d4c5e1f2da8cf18 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Wed, 17 Jan 2024 09:44:49 +0100 Subject: [PATCH 09/23] implement multilatency model --- pyannote/audio/__init__.py | 3 +- pyannote/audio/core/inference.py | 92 +- .../segmentation/MultilatencyPyanNet.py | 310 ++++++ .../audio/models/segmentation/__init__.py | 6 +- pyannote/audio/tasks/__init__.py | 3 + ...tilatency_streaming_speaker_diarization.py | 921 ++++++++++++++++++ 6 files changed, 1330 insertions(+), 5 deletions(-) create mode 100644 pyannote/audio/models/segmentation/MultilatencyPyanNet.py create mode 100644 pyannote/audio/tasks/segmentation/multilatency_streaming_speaker_diarization.py diff --git a/pyannote/audio/__init__.py b/pyannote/audio/__init__.py index 462a15d77..004f5da32 100644 --- a/pyannote/audio/__init__.py +++ b/pyannote/audio/__init__.py @@ -27,8 +27,9 @@ from .core.inference import Inference +from .core.guided_inference import GuidedInference from .core.io import Audio from .core.model import Model from .core.pipeline import Pipeline -__all__ = ["Audio", "Model", "Inference", "Pipeline"] +__all__ = ["Audio", "Model", "Inference", "Pipeline", "GuidedInference"] diff --git a/pyannote/audio/core/inference.py b/pyannote/audio/core/inference.py index dcf21868d..50da27a02 100644 --- a/pyannote/audio/core/inference.py +++ b/pyannote/audio/core/inference.py @@ -94,6 +94,7 @@ def __init__( device: torch.device = None, batch_size: int = 32, use_auth_token: Union[Text, None] = None, + latency_index: int = None, ): # ~~~~ model ~~~~~ @@ -118,7 +119,6 @@ def __init__( specifications = self.model.specifications # ~~~~ sliding window ~~~~~ - if window not in ["sliding", "whole"]: raise ValueError('`window` must be "sliding" or "whole".') @@ -139,6 +139,7 @@ def __init__( f"{duration:g}s chunks for inference: this might lead to suboptimal results." ) self.duration = duration + self.latency_index = latency_index # ~~~~ powerset to multilabel conversion ~~~~ @@ -227,6 +228,10 @@ def infer(self, chunks: torch.Tensor) -> Union[np.ndarray, Tuple[np.ndarray]]: def __convert(output: torch.Tensor, conversion: nn.Module, **kwargs): return conversion(output).cpu().numpy() + if self.latency_index is not None: + return map_with_specifications( + self.model.specifications, __convert, outputs[self.latency_index], self.conversion) + return map_with_specifications( self.model.specifications, __convert, outputs, self.conversion ) @@ -605,6 +610,7 @@ def aggregate( ) + 1 ) + aggregated_output: np.ndarray = np.zeros( (num_frames, num_classes), dtype=np.float32 ) @@ -620,14 +626,14 @@ def aggregate( aggregated_mask: np.ndarray = np.zeros( (num_frames, num_classes), dtype=np.float32 ) - # loop on the scores of sliding chunks for (chunk, score), (_, mask) in zip(scores, masks): # chunk ~ Segment # score ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray # mask ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray - start_frame = frames.closest_frame(chunk.start) + + aggregated_output[start_frame : start_frame + num_frames_per_chunk] += ( score * mask * hamming_window * warm_up_window ) @@ -652,6 +658,86 @@ def aggregate( return SlidingWindowFeature(average, frames) + @staticmethod + def aggregate_end_chunk( + scores: SlidingWindowFeature, + frames: SlidingWindow = None, + warm_up: Tuple[float, float] = (0.0, 0.0), + epsilon: float = 1e-12, + hamming: bool = False, + missing: float = np.NaN, + skip_average: bool = False, + ) -> SlidingWindowFeature: + """Aggregation + + Parameters + ---------- + scores : SlidingWindowFeature + Raw (unaggregated) scores. Shape is (num_chunks, num_frames_per_chunk, num_classes). + frames : SlidingWindow, optional + Frames resolution. Defaults to estimate it automatically based on `scores` shape + and chunk size. Providing the exact frame resolution (when known) leads to better + temporal precision. + warm_up : (float, float) tuple, optional + Left/right warm up duration (in seconds). + missing : float, optional + Value used to replace missing (ie all NaNs) values. + skip_average : bool, optional + Skip final averaging step. + + Returns + ------- + aggregated_scores : SlidingWindowFeature + Aggregated scores. Shape is (num_frames, num_classes) + """ + + num_chunks, num_frames_per_chunk, num_classes = scores.data.shape + + chunks = scores.sliding_window + if frames is None: + duration = step = chunks.duration / num_frames_per_chunk + frames = SlidingWindow(start=chunks.start, duration=duration, step=step) + else: + frames = SlidingWindow( + start=chunks.start, + duration=frames.duration, + step=frames.step, + ) + masks = 1 - np.isnan(scores) + scores.data = np.nan_to_num(scores.data, copy=True, nan=0.0) + + # aggregated_output[i] will be used to store the sum of all predictions + # for frame #i + num_frames = ( + frames.closest_frame( + scores.sliding_window.start + + scores.sliding_window.duration + + (num_chunks - 1) * scores.sliding_window.step + ) + + 1 + ) + step_frames = frames.closest_frame(scores.sliding_window.step) + aggregated_output: np.ndarray = np.zeros( + (num_frames, num_classes), dtype=np.float32 + ) + aggregated_output[0 : num_frames_per_chunk-step_frames] = scores[0][:num_frames_per_chunk-step_frames] + end = scores.sliding_window.duration - scores.sliding_window.step + + # data = scores.data + # print(data.shape) + # data=data[1:] + # scores = scores[1:] + # loop on the scores of sliding chunks + for (chunk, score) in scores: + # chunk ~ Segment + # score ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray + # mask ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray + start_frame = frames.closest_frame(end) + aggregated_output[start_frame : start_frame + step_frames] = score[num_frames_per_chunk-step_frames:] + end = chunk.end + + return SlidingWindowFeature(aggregated_output, frames) + @staticmethod def trim( scores: SlidingWindowFeature, diff --git a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py new file mode 100644 index 000000000..b6e3f2ffd --- /dev/null +++ b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py @@ -0,0 +1,310 @@ +# MIT License +# +# Copyright (c) 2020 CNRS +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +from typing import Optional +from dataclasses import dataclass +from pyannote.audio.utils.multi_task import map_with_specifications +from pyannote.audio.core.task import ( + Problem, + Resolution, + Specifications, + Task, + UnknownSpecificationsError, +) +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from pyannote.core.utils.generators import pairwise +from functools import cached_property +from typing import Any, Dict, List, Optional, Text, Tuple, Union + +from pyannote.audio.core.model import Model +from pyannote.core import SlidingWindow + +from pyannote.audio.core.task import Task +from pyannote.audio.models.blocks.sincnet import SincNet +from pyannote.audio.utils.params import merge_dict + +@dataclass +class Output: + num_frames: int + dimension: int + frames: SlidingWindow + +class MultilatencyPyanNet(Model): + """PyanNet segmentation model + + SincNet > LSTM > Feed forward > Classifier + + Parameters + ---------- + sample_rate : int, optional + Audio sample rate. Defaults to 16kHz (16000). + num_channels : int, optional + Number of channels. Defaults to mono (1). + sincnet : dict, optional + Keyword arugments passed to the SincNet block. + Defaults to {"stride": 1}. + lstm : dict, optional + Keyword arguments passed to the LSTM layer. + Defaults to {"hidden_size": 128, "num_layers": 2, "bidirectional": True}, + i.e. two bidirectional layers with 128 units each. + Set "monolithic" to False to split monolithic multi-layer LSTM into multiple mono-layer LSTMs. + This may proove useful for probing LSTM internals. + linear : dict, optional + Keyword arugments used to initialize linear layers + Defaults to {"hidden_size": 128, "num_layers": 2}, + i.e. two linear layers with 128 units each. + """ + + SINCNET_DEFAULTS = {"stride": 10} + LSTM_DEFAULTS = { + "hidden_size": 128, + "num_layers": 2, + "bidirectional": True, + "monolithic": True, + "dropout": 0.0, + } + LINEAR_DEFAULTS = {"hidden_size": 128, "num_layers": 2} + + def __init__( + self, + sincnet: dict = None, + lstm: dict = None, + linear: dict = None, + sample_rate: int = 16000, + num_channels: int = 1, + task: Optional[Task] = None, + ): + super().__init__(sample_rate=sample_rate, num_channels=num_channels, task=task) + + sincnet = merge_dict(self.SINCNET_DEFAULTS, sincnet) + sincnet["sample_rate"] = sample_rate + lstm = merge_dict(self.LSTM_DEFAULTS, lstm) + lstm["batch_first"] = True + linear = merge_dict(self.LINEAR_DEFAULTS, linear) + self.save_hyperparameters("sincnet", "lstm", "linear") + + self.sincnet = SincNet(**self.hparams.sincnet) + + monolithic = lstm["monolithic"] + if monolithic: + multi_layer_lstm = dict(lstm) + del multi_layer_lstm["monolithic"] + self.lstm = nn.ModuleList([nn.LSTM(60, **multi_layer_lstm) for i in range(len(self.task.latency_list))]) + + else: + num_layers = lstm["num_layers"] + if num_layers > 1: + self.dropout = nn.Dropout(p=lstm["dropout"]) + + one_layer_lstm = dict(lstm) + one_layer_lstm["num_layers"] = 1 + one_layer_lstm["dropout"] = 0.0 + del one_layer_lstm["monolithic"] + + self.lstm = nn.ModuleList( + [ + nn.LSTM( + 60 + if i == 0 + else lstm["hidden_size"] * (2 if lstm["bidirectional"] else 1), + **one_layer_lstm + ) + for i in range(num_layers) + ] + ) + + if linear["num_layers"] < 1: + return + + lstm_out_features: int = self.hparams.lstm["hidden_size"] * ( + 2 if self.hparams.lstm["bidirectional"] else 1 + ) + self.linear = nn.ModuleList([nn.ModuleList( + [ + nn.Linear(in_features, out_features) + for in_features, out_features in pairwise( + [ + lstm_out_features, + ] + + [self.hparams.linear["hidden_size"]] + * self.hparams.linear["num_layers"] + ) + ] + ) for i in range(len(self.task.latency_list))]) + + def build(self): + if self.hparams.linear["num_layers"] > 0: + in_features = self.hparams.linear["hidden_size"] + else: + in_features = self.hparams.lstm["hidden_size"] * ( + 2 if self.hparams.lstm["bidirectional"] else 1 + ) + + if isinstance(self.specifications, tuple): + raise ValueError("PyanNet does not support multi-tasking.") + + if self.specifications.powerset: + out_features = self.specifications.num_powerset_classes + else: + out_features = len(self.specifications.classes) + + self.classifier = nn.ModuleList([nn.Linear(in_features, out_features) for i in range(len(self.task.latency_list))]) + self.activation = self.default_activation() + + def forward(self, waveforms: torch.Tensor) -> torch.Tensor: + """Pass forward + + Parameters + ---------- + waveforms : (batch, channel, sample) + + Returns + ------- + scores : (batch, frame, classes) + """ + sincnet_output = self.sincnet(waveforms) + predictions = [] + for k in range(len(self.task.latency_list)): + if self.hparams.lstm["monolithic"]: + outputs, _ = self.lstm[k]( + rearrange(sincnet_output, "batch feature frame -> batch frame feature") + ) + else: + outputs = rearrange(sincnet_output, "batch feature frame -> batch frame feature") + for i, lstm in enumerate(self.lstm): + outputs, _ = lstm(outputs) + if i + 1 < self.hparams.lstm["num_layers"]: + outputs = self.dropout(outputs) + + if self.hparams.linear["num_layers"] > 0: + for linear in self.linear[k]: + outputs = F.leaky_relu(linear(outputs)) + + predictions.append(self.activation(self.classifier[k](outputs))) + predictions = torch.stack(predictions, dim=0) + + return predictions + + + + + def __example_input_array(self, duration: Optional[float] = None) -> torch.Tensor: + duration = duration or next(iter(self.specifications)).duration + return torch.randn( + ( + 1, + self.hparams.num_channels, + self.audio.get_num_samples(duration), + ), + device=self.device, + ) + + @property + def example_input_array(self) -> torch.Tensor: + return self.__example_input_array() + + + @cached_property + def example_output(self) -> Union[Output, Tuple[Output]]: + """Example output""" + example_input_array = self.__example_input_array() + with torch.inference_mode(): + example_output = self(example_input_array) + + def __example_output( + example_output: torch.Tensor, + specifications: Specifications = None, + ) -> Output: + if specifications.resolution == Resolution.FRAME: + _, _, num_frames, dimension = example_output.shape + frame_duration = specifications.duration / num_frames + frames = SlidingWindow(step=frame_duration, duration=frame_duration) + else: + _, dimension = example_output.shape + num_frames = None + frames = None + + return Output( + num_frames=num_frames, + dimension=dimension, + frames=frames, + ) + + return map_with_specifications( + self.specifications, __example_output, example_output + ) + + def setup(self, stage=None): + if stage == "fit": + self.task.setup_metadata() + + # list of layers before adding task-dependent layers + before = set((name, id(module)) for name, module in self.named_modules()) + + # add task-dependent layers (e.g. final classification layer) + # and re-use original weights when compatible + + original_state_dict = self.state_dict() + self.build() + + try: + missing_keys, unexpected_keys = self.load_state_dict( + original_state_dict, strict=False + ) + + except RuntimeError as e: + if "size mismatch" in str(e): + msg = ( + "Model has been trained for a different task. For fine tuning or transfer learning, " + "it is recommended to train task-dependent layers for a few epochs " + f"before training the whole model: {self.task_dependent}." + ) + warnings.warn(msg) + else: + raise e + + # move layers that were added by build() to same device as the rest of the model + for name, module in self.named_modules(): + if (name, id(module)) not in before: + module.to(self.device) + + # add (trainable) loss function (e.g. ArcFace has its own set of trainable weights) + if stage == "fit": + # let task know about the model + self.task.model = self + # setup custom loss function + self.task.setup_loss_func() + # setup custom validation metrics + self.task.setup_validation_metric() + + # cache for later (and to avoid later CUDA error with multiprocessing) + _ = self.example_output + + # list of layers after adding task-dependent layers + after = set((name, id(module)) for name, module in self.named_modules()) + + # list of task-dependent layers + self.task_dependent = list(name for name, _ in after - before) \ No newline at end of file diff --git a/pyannote/audio/models/segmentation/__init__.py b/pyannote/audio/models/segmentation/__init__.py index 9f6f5f6e3..10104e3b9 100644 --- a/pyannote/audio/models/segmentation/__init__.py +++ b/pyannote/audio/models/segmentation/__init__.py @@ -22,5 +22,9 @@ from .PyanNet import PyanNet from .SSeRiouSS import SSeRiouSS +from .GuidedPyanNet import GuidedPyanNet +from .MultilatencyPyanNet import MultilatencyPyanNet -__all__ = ["PyanNet", "SSeRiouSS"] + + +__all__ = ["PyanNet", "SSeRiouSS", "GuidedPyanNet", "MultilatencyPyanNet"] diff --git a/pyannote/audio/tasks/__init__.py b/pyannote/audio/tasks/__init__.py index c8483711a..3542e3fd1 100644 --- a/pyannote/audio/tasks/__init__.py +++ b/pyannote/audio/tasks/__init__.py @@ -23,6 +23,8 @@ from .segmentation.multilabel import MultiLabelSegmentation # isort:skip from .segmentation.speaker_diarization import SpeakerDiarization # isort:skip from .segmentation.streaming_speaker_diarization import StreamingSpeakerDiarization # isort:skip +from .segmentation.multilatency_streaming_speaker_diarization import MultilatencyStreamingSpeakerDiarization # isort:skip + from .segmentation.voice_activity_detection import VoiceActivityDetection # isort:skip from .segmentation.overlapped_speech_detection import ( # isort:skip OverlappedSpeechDetection, @@ -43,4 +45,5 @@ "MultiLabelSegmentation", "SpeakerEmbedding", "Segmentation", + "MultilatencyStreamingSpeakerDiarization", ] diff --git a/pyannote/audio/tasks/segmentation/multilatency_streaming_speaker_diarization.py b/pyannote/audio/tasks/segmentation/multilatency_streaming_speaker_diarization.py new file mode 100644 index 000000000..208931417 --- /dev/null +++ b/pyannote/audio/tasks/segmentation/multilatency_streaming_speaker_diarization.py @@ -0,0 +1,921 @@ +# MIT License +# +# Copyright (c) 2020- CNRS +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import sys +import math +import warnings +from collections import Counter +from typing import Dict, Literal, Sequence, Text, Tuple, Union, List + +import numpy as np +import torch +import torch.nn.functional +from matplotlib import pyplot as plt +from pyannote.core import Segment, SlidingWindowFeature +from pyannote.database.protocol import SpeakerDiarizationProtocol +from pyannote.database.protocol.protocol import Scope, Subset +from pytorch_lightning.loggers import MLFlowLogger, TensorBoardLogger +from rich.progress import track +from torch_audiomentations.core.transforms_interface import BaseWaveformTransform +from torchmetrics import Metric + +from pyannote.audio.core.task import Problem, Resolution, Specifications, Task +from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin +from pyannote.audio.torchmetrics import ( + DiarizationErrorRate, + FalseAlarmRate, + MissedDetectionRate, + OptimalDiarizationErrorRate, + OptimalDiarizationErrorRateThreshold, + OptimalFalseAlarmRate, + OptimalMissedDetectionRate, + OptimalSpeakerConfusionRate, + SpeakerConfusionRate, +) +from pyannote.audio.utils.loss import binary_cross_entropy, mse_loss, nll_loss +from pyannote.audio.utils.permutation import permutate +from pyannote.audio.utils.powerset import Powerset + +Subsets = list(Subset.__args__) +Scopes = list(Scope.__args__) + + +class MultilatencyStreamingSpeakerDiarization(SegmentationTaskMixin, Task): + """Speaker diarization + + Parameters + ---------- + protocol : SpeakerDiarizationProtocol + pyannote.database protocol + duration : float, optional + Chunks duration. Defaults to 2s. + max_speakers_per_chunk : int, optional + Maximum number of speakers per chunk (must be at least 2). + Defaults to estimating it from the training set. + max_speakers_per_frame : int, optional + Maximum number of (overlapping) speakers per frame. + Setting this value to 1 or more enables `powerset multi-class` training. + Default behavior is to use `multi-label` training. + weigh_by_cardinality: bool, optional + Weigh each powerset classes by the size of the corresponding speaker set. + In other words, {0, 1} powerset class weight is 2x bigger than that of {0} + or {1} powerset classes. Note that empty (non-speech) powerset class is + assigned the same weight as mono-speaker classes. Defaults to False (i.e. use + same weight for every class). Has no effect with `multi-label` training. + warm_up : float or (float, float), optional + Use that many seconds on the left- and rightmost parts of each chunk + to warm up the model. While the model does process those left- and right-most + parts, only the remaining central part of each chunk is used for computing the + loss during training, and for aggregating scores during inference. + Defaults to 0. (i.e. no warm-up). + balance: Sequence[Text], optional + When provided, training samples are sampled uniformly with respect to these keys. + For instance, setting `balance` to ["database","subset"] will make sure that each + database & subset combination will be equally represented in the training samples. + weight: str, optional + When provided, use this key as frame-wise weight in loss function. + batch_size : int, optional + Number of training samples per batch. Defaults to 32. + num_workers : int, optional + Number of workers used for generating training samples. + Defaults to multiprocessing.cpu_count() // 2. + pin_memory : bool, optional + If True, data loaders will copy tensors into CUDA pinned + memory before returning them. See pytorch documentation + for more details. Defaults to False. + augmentation : BaseWaveformTransform, optional + torch_audiomentations waveform transform, used by dataloader + during training. + vad_loss : {"bce", "mse"}, optional + Add voice activity detection loss. + Cannot be used in conjunction with `max_speakers_per_frame`. + metric : optional + Validation metric(s). Can be anything supported by torchmetrics.MetricCollection. + Defaults to AUROC (area under the ROC curve). + + References + ---------- + Hervé Bredin and Antoine Laurent + "End-To-End Speaker Segmentation for Overlap-Aware Resegmentation." + Proc. Interspeech 2021 + + Zhihao Du, Shiliang Zhang, Siqi Zheng, and Zhijie Yan + "Speaker Embedding-aware Neural Diarization: an Efficient Framework for Overlapping + Speech Diarization in Meeting Scenarios" + https://arxiv.org/abs/2203.09767 + + """ + + def __init__( + self, + protocol: SpeakerDiarizationProtocol, + duration: float = 2.0, + max_speakers_per_chunk: int = None, + max_speakers_per_frame: int = None, + weigh_by_cardinality: bool = False, + warm_up: Union[float, Tuple[float, float]] = 0.0, + balance: Sequence[Text] = None, + weight: Text = None, + batch_size: int = 32, + num_workers: int = None, + pin_memory: bool = False, + augmentation: BaseWaveformTransform = None, + vad_loss: Literal["bce", "mse"] = None, + metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None, + max_num_speakers: int = None, # deprecated in favor of `max_speakers_per_chunk`` + loss: Literal["bce", "mse"] = None, # deprecated + latency: float = 0.0, + latency_list: List[float] = [0.0], + + ): + super().__init__( + protocol, + duration=duration, + warm_up=warm_up, + batch_size=batch_size, + num_workers=num_workers, + pin_memory=pin_memory, + augmentation=augmentation, + metric=metric, + ) + + if not isinstance(protocol, SpeakerDiarizationProtocol): + raise ValueError( + "SpeakerDiarization task requires a SpeakerDiarizationProtocol." + ) + + # deprecation warnings + if max_speakers_per_chunk is None and max_num_speakers is not None: + max_speakers_per_chunk = max_num_speakers + warnings.warn( + "`max_num_speakers` has been deprecated in favor of `max_speakers_per_chunk`." + ) + if loss is not None: + warnings.warn("`loss` has been deprecated and has no effect.") + + # parameter validation + if max_speakers_per_frame is not None: + if max_speakers_per_frame < 1: + raise ValueError( + f"`max_speakers_per_frame` must be 1 or more (you used {max_speakers_per_frame})." + ) + if vad_loss is not None: + raise ValueError( + "`vad_loss` cannot be used jointly with `max_speakers_per_frame`" + ) + + self.max_speakers_per_chunk = max_speakers_per_chunk + self.max_speakers_per_frame = max_speakers_per_frame + self.weigh_by_cardinality = weigh_by_cardinality + self.balance = balance + self.weight = weight + self.vad_loss = vad_loss + self.latency=latency + self.latency_list=latency_list + + + def setup(self): + super().setup() + + # estimate maximum number of speakers per chunk when not provided + if self.max_speakers_per_chunk is None: + training = self.metadata["subset"] == Subsets.index("train") + + num_unique_speakers = [] + progress_description = f"Estimating maximum number of speakers per {self.duration:g}s chunk in the training set" + for file_id in track( + np.where(training)[0], description=progress_description + ): + annotations = self.annotations[ + np.where(self.annotations["file_id"] == file_id)[0] + ] + annotated_regions = self.annotated_regions[ + np.where(self.annotated_regions["file_id"] == file_id)[0] + ] + for region in annotated_regions: + # find annotations within current region + region_start = region["start"] + region_end = region["end"] + region_annotations = annotations[ + np.where( + (annotations["start"] >= region_start) + * (annotations["end"] <= region_end) + )[0] + ] + + for window_start in np.arange( + region_start, region_end - self.duration, 0.25 * self.duration + ): + window_end = window_start + self.duration + window_annotations = region_annotations[ + np.where( + (region_annotations["start"] <= window_end) + * (region_annotations["end"] >= window_start) + )[0] + ] + num_unique_speakers.append( + len(np.unique(window_annotations["file_label_idx"])) + ) + + # because there might a few outliers, estimate the upper bound for the + # number of speakers as the 97th percentile + + num_speakers, counts = zip(*list(Counter(num_unique_speakers).items())) + num_speakers, counts = np.array(num_speakers), np.array(counts) + + sorting_indices = np.argsort(num_speakers) + num_speakers = num_speakers[sorting_indices] + counts = counts[sorting_indices] + + ratios = np.cumsum(counts) / np.sum(counts) + + for k, ratio in zip(num_speakers, ratios): + if k == 0: + print(f" - {ratio:7.2%} of all chunks contain no speech at all.") + elif k == 1: + print(f" - {ratio:7.2%} contain 1 speaker or less") + else: + print(f" - {ratio:7.2%} contain {k} speakers or less") + + self.max_speakers_per_chunk = max( + 2, + num_speakers[np.where(ratios > 0.97)[0][0]], + ) + + print( + f"Setting `max_speakers_per_chunk` to {self.max_speakers_per_chunk}. " + f"You can override this value (or avoid this estimation step) by passing `max_speakers_per_chunk={self.max_speakers_per_chunk}` to the task constructor." + ) + + if ( + self.max_speakers_per_frame is not None + and self.max_speakers_per_frame > self.max_speakers_per_chunk + ): + raise ValueError( + f"`max_speakers_per_frame` ({self.max_speakers_per_frame}) must be smaller " + f"than `max_speakers_per_chunk` ({self.max_speakers_per_chunk})" + ) + + # now that we know about the number of speakers upper bound + # we can set task specifications + self.specifications = Specifications( + problem=Problem.MULTI_LABEL_CLASSIFICATION + if self.max_speakers_per_frame is None + else Problem.MONO_LABEL_CLASSIFICATION, + resolution=Resolution.FRAME, + duration=self.duration, + min_duration=self.min_duration, + warm_up=self.warm_up, + classes=[f"speaker#{i+1}" for i in range(self.max_speakers_per_chunk)], + powerset_max_classes=self.max_speakers_per_frame, + permutation_invariant=True, + ) + + def setup_loss_func(self): + if self.specifications.powerset: + self.model.powerset = Powerset( + len(self.specifications.classes), + self.specifications.powerset_max_classes, + ) + + def prepare_chunk(self, file_id: int, start_time: float, duration: float): + """Prepare chunk + + Parameters + ---------- + file_id : int + File index + start_time : float + Chunk start time + duration : float + Chunk duration. + + Returns + ------- + sample : dict + Dictionary containing the chunk data with the following keys: + - `X`: waveform + - `y`: target as a SlidingWindowFeature instance where y.labels is + in meta.scope space. + - `meta`: + - `scope`: target scope (0: file, 1: database, 2: global) + - `database`: database index + - `file`: file index + """ + + file = self.get_file(file_id) + + # get label scope + label_scope = Scopes[self.metadata[file_id]["scope"]] + label_scope_key = f"{label_scope}_label_idx" + + # + chunk = Segment(start_time, start_time + duration) + + sample = dict() + sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration) + + # gather all annotations of current file + annotations = self.annotations[self.annotations["file_id"] == file_id] + + # gather all annotations with non-empty intersection with current chunk + chunk_annotations = annotations[ + (annotations["start"] < chunk.end) & (annotations["end"] > chunk.start) + ] + + # discretize chunk annotations at model output resolution + start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start + start_idx = np.floor(start / self.model.example_output.frames.step).astype(int) + end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start + end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int) + + # get list and number of labels for current scope + labels = list(np.unique(chunk_annotations[label_scope_key])) + num_labels = len(labels) + + if num_labels > self.max_speakers_per_chunk: + pass + + # initial frame-level targets + y = np.zeros((self.model.example_output.num_frames, num_labels), dtype=np.uint8) + + # map labels to indices + mapping = {label: idx for idx, label in enumerate(labels)} + + for start, end, label in zip( + start_idx, end_idx, chunk_annotations[label_scope_key] + ): + mapped_label = mapping[label] + y[start:end, mapped_label] = 1 + + sample["y"] = SlidingWindowFeature( + y, self.model.example_output.frames, labels=labels + ) + + metadata = self.metadata[file_id] + sample["meta"] = {key: metadata[key] for key in metadata.dtype.names} + sample["meta"]["file"] = file_id + + return sample + + def collate_y(self, batch) -> torch.Tensor: + """ + + Parameters + ---------- + batch : list + List of samples to collate. + "y" field is expected to be a SlidingWindowFeature. + + Returns + ------- + y : torch.Tensor + Collated target tensor of shape (num_frames, self.max_speakers_per_chunk) + If one chunk has more than `self.max_speakers_per_chunk` speakers, we keep + the max_speakers_per_chunk most talkative ones. If it has less, we pad with + zeros (artificial inactive speakers). + """ + + collated_y = [] + for b in batch: + y = b["y"].data + num_speakers = len(b["y"].labels) + if num_speakers > self.max_speakers_per_chunk: + # sort speakers in descending talkativeness order + indices = np.argsort(-np.sum(y, axis=0), axis=0) + # keep only the most talkative speakers + y = y[:, indices[: self.max_speakers_per_chunk]] + + # TODO: we should also sort the speaker labels in the same way + + elif num_speakers < self.max_speakers_per_chunk: + # create inactive speakers by zero padding + y = np.pad( + y, + ((0, 0), (0, self.max_speakers_per_chunk - num_speakers)), + mode="constant", + ) + + else: + # we have exactly the right number of speakers + pass + + collated_y.append(y) + + return torch.from_numpy(np.stack(collated_y)) + + def segmentation_loss( + self, + permutated_prediction: torch.Tensor, + target: torch.Tensor, + weight: torch.Tensor = None, + ) -> torch.Tensor: + """Permutation-invariant segmentation loss + + Parameters + ---------- + permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor + Permutated speaker activity predictions. + target : (batch_size, num_frames, num_speakers) torch.Tensor + Speaker activity. + weight : (batch_size, num_frames, 1) torch.Tensor, optional + Frames weight. + + Returns + ------- + seg_loss : torch.Tensor + Permutation-invariant segmentation loss + """ + if self.specifications.powerset: + # `clamp_min` is needed to set non-speech weight to 1. + class_weight = ( + torch.clamp_min(self.model.powerset.cardinality, 1.0) + if self.weigh_by_cardinality + else None + ) + + seg_loss = nll_loss( + permutated_prediction, + torch.argmax(target, dim=-1), + class_weight=class_weight, + weight=weight, + ) + else: + seg_loss = binary_cross_entropy( + permutated_prediction, target.float(), weight=weight + ) + + return seg_loss + + def voice_activity_detection_loss( + self, + permutated_prediction: torch.Tensor, + target: torch.Tensor, + weight: torch.Tensor = None, + ) -> torch.Tensor: + """Voice activity detection loss + + Parameters + ---------- + permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor + Speaker activity predictions. + target : (batch_size, num_frames, num_speakers) torch.Tensor + Speaker activity. + weight : (batch_size, num_frames, 1) torch.Tensor, optional + Frames weight. + + Returns + ------- + vad_loss : torch.Tensor + Voice activity detection loss. + """ + + vad_prediction, _ = torch.max(permutated_prediction, dim=2, keepdim=True) + # (batch_size, num_frames, 1) + + vad_target, _ = torch.max(target.float(), dim=2, keepdim=False) + # (batch_size, num_frames) + + if self.vad_loss == "bce": + loss = binary_cross_entropy(vad_prediction, vad_target, weight=weight) + + elif self.vad_loss == "mse": + loss = mse_loss(vad_prediction, vad_target, weight=weight) + + return loss + + def training_step(self, batch, batch_idx: int): + """Compute permutation-invariant segmentation loss + + Parameters + ---------- + batch : (usually) dict of torch.Tensor + Current batch. + batch_idx: int + Batch index. + + Returns + ------- + loss : {str: torch.tensor} + {"loss": loss} + """ + + # target + target = batch["y"] + # (batch_size, num_frames, num_speakers) + + waveform = batch["X"] + # (batch_size, num_channels, num_samples) + + # drop samples that contain too many speakers + num_speakers: torch.Tensor = torch.sum(torch.any(target, dim=1), dim=1) + keep: torch.Tensor = num_speakers <= self.max_speakers_per_chunk + target = target[keep] + waveform = waveform[keep] + + # corner case + if not keep.any(): + return None + + # forward pass + predictions = self.model(waveform) + seg_loss = 0 + for k in range(len(self.latency_list)): + prediction = predictions[k] + batch_size, num_frames, _ = prediction.shape + # (batch_size, num_frames, num_classes) + + # frames weight + weight_key = getattr(self, "weight", None) + weight = batch.get( + weight_key, + torch.ones(batch_size, num_frames, 1, device=self.model.device), + ) + # (batch_size, num_frames, 1) + + # warm-up + warm_up_left = round(self.warm_up[0] / self.duration * num_frames) + weight[:, :warm_up_left] = 0.0 + warm_up_right = round(self.warm_up[1] / self.duration * num_frames) + weight[:, num_frames - warm_up_right :] = 0.0 + + delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down + + prediction = prediction[:, delay:, :] + target = target[:, :num_frames-delay, :] + + #future + # prediction = prediction[:, :num_frames-delay, :] + # target = target[:, delay:, :] + + + + if self.specifications.powerset: + multilabel = self.model.powerset.to_multilabel(prediction) + permutated_target, _ = permutate(multilabel, target) + permutated_target_powerset = self.model.powerset.to_powerset( + permutated_target.float() + ) + seg_loss += self.segmentation_loss( + prediction, permutated_target_powerset, weight=weight + ) + + else: + permutated_prediction, _ = permutate(target, prediction) + seg_loss += self.segmentation_loss( + permutated_prediction, target, weight=weight + ) + + + self.model.log( + "loss/train/segmentation", + seg_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + if self.vad_loss is None: + vad_loss = 0.0 + + else: + # TODO: vad_loss probably does not make sense in powerset mode + # because first class (empty set of labels) does exactly this... + if self.specifications.powerset: + vad_loss = self.voice_activity_detection_loss( + prediction, permutated_target_powerset, weight=weight + ) + + else: + vad_loss = self.voice_activity_detection_loss( + permutated_prediction, target, weight=weight + ) + + self.model.log( + "loss/train/vad", + vad_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + loss = seg_loss + vad_loss + + # skip batch if something went wrong for some reason + if torch.isnan(loss): + return None + + self.model.log( + "loss/train", + loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + return {"loss": loss} + + def default_metric( + self, + ) -> Union[Metric, Sequence[Metric], Dict[str, Metric]]: + """Returns diarization error rate and its components""" + + if self.specifications.powerset: + return { + "DiarizationErrorRate": DiarizationErrorRate(0.5), + "DiarizationErrorRate/Confusion": SpeakerConfusionRate(0.5), + "DiarizationErrorRate/Miss": MissedDetectionRate(0.5), + "DiarizationErrorRate/FalseAlarm": FalseAlarmRate(0.5), + } + + return { + "DiarizationErrorRate": OptimalDiarizationErrorRate(), + "DiarizationErrorRate/Threshold": OptimalDiarizationErrorRateThreshold(), + "DiarizationErrorRate/Confusion": OptimalSpeakerConfusionRate(), + "DiarizationErrorRate/Miss": OptimalMissedDetectionRate(), + "DiarizationErrorRate/FalseAlarm": OptimalFalseAlarmRate(), + } + + # TODO: no need to compute gradient in this method + def validation_step(self, batch, batch_idx: int): + """Compute validation loss and metric + + Parameters + ---------- + batch : dict of torch.Tensor + Current batch. + batch_idx: int + Batch index. + """ + + # target + target = batch["y"] + # (batch_size, num_frames, num_speakers) + + waveform = batch["X"] + # (batch_size, num_channels, num_samples) + + # TODO: should we handle validation samples with too many speakers + # waveform = waveform[keep] + # target = target[keep] + + # forward pass + predictions = self.model(waveform) + losses=[] + for k in range(len(self.latency_list)): + prediction = predictions[k] + batch_size, num_frames, _ = prediction.shape + + # frames weight + weight_key = getattr(self, "weight", None) + weight = batch.get( + weight_key, + torch.ones(batch_size, num_frames, 1, device=self.model.device), + ) + # (batch_size, num_frames, 1) + + # warm-up + warm_up_left = round(self.warm_up[0] / self.duration * num_frames) + weight[:, :warm_up_left] = 0.0 + warm_up_right = round(self.warm_up[1] / self.duration * num_frames) + weight[:, num_frames - warm_up_right :] = 0.0 + + delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down + + prediction = prediction[:, delay:, :] + reference = target[:, :num_frames-delay, :] + + #future + # prediction = prediction[:, :num_frames-delay, :] + # target = target[:, delay:, :] + + if self.specifications.powerset: + multilabel = self.model.powerset.to_multilabel(prediction) + permutated_target, _ = permutate(multilabel, reference) + + # FIXME: handle case where target have too many speakers? + # since we don't need + permutated_target_powerset = self.model.powerset.to_powerset( + permutated_target.float() + ) + losses.append(self.segmentation_loss( + prediction, permutated_target_powerset, weight=weight + )) + + else: + permutated_prediction, _ = permutate(reference, prediction) + losses.append(self.segmentation_loss( + permutated_prediction, reference, weight=weight + )) + + target = target[:, :num_frames-delay, :] + + + seg_loss = torch.sum(torch.tensor(losses)) + + self.model.log( + "loss/val/segmentation", + seg_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + if self.vad_loss is None: + vad_loss = 0.0 + + else: + # TODO: vad_loss probably does not make sense in powerset mode + # because first class (empty set of labels) does exactly this... + if self.specifications.powerset: + vad_loss = self.voice_activity_detection_loss( + prediction, permutated_target_powerset, weight=weight + ) + + else: + vad_loss = self.voice_activity_detection_loss( + permutated_prediction, target, weight=weight + ) + + self.model.log( + "loss/val/vad", + vad_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + loss = seg_loss + vad_loss + + self.model.log( + "loss/val", + loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + if self.specifications.powerset: + self.model.validation_metric( + torch.transpose( + multilabel[:, warm_up_left : num_frames - warm_up_right], 1, 2 + ), + torch.transpose( + target[:, warm_up_left : num_frames - warm_up_right], 1, 2 + ), + ) + else: + self.model.validation_metric( + torch.transpose( + prediction[:, warm_up_left : num_frames - warm_up_right], 1, 2 + ), + torch.transpose( + target[:, warm_up_left : num_frames - warm_up_right], 1, 2 + ), + ) + + self.model.log_dict( + self.model.validation_metric, + on_step=False, + on_epoch=True, + prog_bar=True, + logger=True, + ) + + # log first batch visualization every 2^n epochs. + if ( + self.model.current_epoch == 0 + or math.log2(self.model.current_epoch) % 1 > 0 + or batch_idx > 0 + ): + return + + # visualize first 9 validation samples of first batch in Tensorboard/MLflow + + if self.specifications.powerset: + y = permutated_target.float().cpu().numpy() + y_pred = multilabel.cpu().numpy() + else: + y = target.float().cpu().numpy() + y_pred = permutated_prediction.cpu().numpy() + + # prepare 3 x 3 grid (or smaller if batch size is smaller) + num_samples = min(self.batch_size, 9) + nrows = math.ceil(math.sqrt(num_samples)) + ncols = math.ceil(num_samples / nrows) + fig, axes = plt.subplots( + nrows=2 * nrows, ncols=ncols, figsize=(8, 5), squeeze=False + ) + + # reshape target so that there is one line per class when plotting it + y[y == 0] = np.NaN + if len(y.shape) == 2: + y = y[:, :, np.newaxis] + y *= np.arange(y.shape[2]) + + # plot each sample + for sample_idx in range(num_samples): + # find where in the grid it should be plotted + row_idx = sample_idx // nrows + col_idx = sample_idx % ncols + + # plot target + ax_ref = axes[row_idx * 2 + 0, col_idx] + sample_y = y[sample_idx] + ax_ref.plot(sample_y) + ax_ref.set_xlim(0, len(sample_y)) + ax_ref.set_ylim(-1, sample_y.shape[1]) + ax_ref.get_xaxis().set_visible(False) + ax_ref.get_yaxis().set_visible(False) + + # plot predictions + ax_hyp = axes[row_idx * 2 + 1, col_idx] + sample_y_pred = y_pred[sample_idx] + ax_hyp.axvspan(0, warm_up_left, color="k", alpha=0.5, lw=0) + ax_hyp.axvspan( + num_frames - warm_up_right, num_frames, color="k", alpha=0.5, lw=0 + ) + ax_hyp.plot(sample_y_pred) + ax_hyp.set_ylim(-0.1, 1.1) + ax_hyp.set_xlim(0, len(sample_y)) + ax_hyp.get_xaxis().set_visible(False) + + plt.tight_layout() + + for logger in self.model.loggers: + if isinstance(logger, TensorBoardLogger): + logger.experiment.add_figure("samples", fig, self.model.current_epoch) + elif isinstance(logger, MLFlowLogger): + logger.experiment.log_figure( + run_id=logger.run_id, + figure=fig, + artifact_file=f"samples_epoch{self.model.current_epoch}.png", + ) + + plt.close(fig) + + +def main(protocol: str, subset: str = "test", model: str = "pyannote/segmentation"): + """Evaluate a segmentation model""" + + from pyannote.database import FileFinder, get_protocol + from rich.progress import Progress + + from pyannote.audio import Inference + from pyannote.audio.pipelines.utils import get_devices + from pyannote.audio.utils.metric import DiscreteDiarizationErrorRate + from pyannote.audio.utils.signal import binarize + + (device,) = get_devices(needs=1) + metric = DiscreteDiarizationErrorRate() + protocol = get_protocol(protocol, preprocessors={"audio": FileFinder()}) + files = list(getattr(protocol, subset)()) + + with Progress() as progress: + main_task = progress.add_task(protocol.name, total=len(files)) + file_task = progress.add_task("Processing", total=1.0) + + def progress_hook(completed: int = None, total: int = None): + progress.update(file_task, completed=completed / total) + + inference = Inference(model, device=device) + + for file in files: + progress.update(file_task, description=file["uri"]) + reference = file["annotation"] + hypothesis = binarize(inference(file, hook=progress_hook)) + uem = file["annotated"] + _ = metric(reference, hypothesis, uem=uem) + progress.advance(main_task) + + _ = metric.report(display=True) + + +if __name__ == "__main__": + import typer + + typer.run(main) From 9ac7294079cd4652b0221941dadee210423e6cc5 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Thu, 8 Feb 2024 16:39:22 +0100 Subject: [PATCH 10/23] add latency_index parameter in inference.py to be able to use the Inference class with a multilatency model --- pyannote/audio/core/inference.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pyannote/audio/core/inference.py b/pyannote/audio/core/inference.py index 50da27a02..c7c3ced2d 100644 --- a/pyannote/audio/core/inference.py +++ b/pyannote/audio/core/inference.py @@ -119,6 +119,7 @@ def __init__( specifications = self.model.specifications # ~~~~ sliding window ~~~~~ + if window not in ["sliding", "whole"]: raise ValueError('`window` must be "sliding" or "whole".') From 9baefb5121e401db9a82205f5b0502e38d9dabfc Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Thu, 8 Feb 2024 16:49:56 +0100 Subject: [PATCH 11/23] Implement guided model and guided task --- .../models/segmentation/GuidedPyanNet.py | 234 +++++ .../segmentation/GuidedSpeakerDiarization | 992 ++++++++++++++++++ 2 files changed, 1226 insertions(+) create mode 100644 pyannote/audio/models/segmentation/GuidedPyanNet.py create mode 100644 pyannote/audio/tasks/segmentation/GuidedSpeakerDiarization diff --git a/pyannote/audio/models/segmentation/GuidedPyanNet.py b/pyannote/audio/models/segmentation/GuidedPyanNet.py new file mode 100644 index 000000000..3b84830e0 --- /dev/null +++ b/pyannote/audio/models/segmentation/GuidedPyanNet.py @@ -0,0 +1,234 @@ +# MIT License +# +# Copyright (c) 2020 CNRS +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from pyannote.core.utils.generators import pairwise + +from pyannote.audio.core.model import Model +from pyannote.audio.core.task import Task +from pyannote.audio.models.blocks.sincnet import SincNet +from pyannote.audio.utils.params import merge_dict + + +class GuidedPyanNet(Model): + """Guided PyanNet segmentation model + SincNet > + > LSTM > Feed forward > Classifier + Guide > + Parameters + ---------- + sample_rate : int, optional + Audio sample rate. Defaults to 16kHz (16000). + num_channels : int, optional + Number of channels. Defaults to mono (1). + sincnet : dict, optional + Keyword arugments passed to the SincNet block. + Defaults to {"stride": 1}. + lstm : dict, optional + Keyword arguments passed to the LSTM layer. + Defaults to {"hidden_size": 128, "num_layers": 2, "bidirectional": True}, + i.e. two bidirectional layers with 128 units each. + Set "monolithic" to False to split monolithic multi-layer LSTM into multiple mono-layer LSTMs. + This may proove useful for probing LSTM internals. + linear : dict, optional + Keyword arugments used to initialize linear layers + Defaults to {"hidden_size": 128, "num_layers": 2}, + i.e. two linear layers with 128 units each. + """ + + SINCNET_DEFAULTS = {"stride": 10} + LSTM_DEFAULTS = { + "hidden_size": 128, + "num_layers": 2, + "bidirectional": True, + "monolithic": True, + "dropout": 0.0, + } + LINEAR_DEFAULTS = {"hidden_size": 128, "num_layers": 2} + + def __init__( + self, + sincnet: dict = None, + lstm: dict = None, + linear: dict = None, + sample_rate: int = 16000, + num_channels: int = 1, + task: Optional[Task] = None, + ): + + super().__init__(sample_rate=sample_rate, num_channels=num_channels, task=task) + + sincnet = merge_dict(self.SINCNET_DEFAULTS, sincnet) + sincnet["sample_rate"] = sample_rate + lstm = merge_dict(self.LSTM_DEFAULTS, lstm) + lstm["batch_first"] = True + linear = merge_dict(self.LINEAR_DEFAULTS, linear) + self.save_hyperparameters("sincnet", "lstm", "linear") + + self.sincnet = SincNet(**self.hparams.sincnet) + + # monolithic = lstm["monolithic"] + # if monolithic: + # multi_layer_lstm = dict(lstm) + # del multi_layer_lstm["monolithic"] + # self.lstm = nn.LSTM(60 + len(self.specifications.classes), **multi_layer_lstm) + + # else: + # num_layers = lstm["num_layers"] + # if num_layers > 1: + # self.dropout = nn.Dropout(p=lstm["dropout"]) + + # one_layer_lstm = dict(lstm) + # one_layer_lstm["num_layers"] = 1 + # one_layer_lstm["dropout"] = 0.0 + # del one_layer_lstm["monolithic"] + + # self.lstm = nn.ModuleList( + # [ + # nn.LSTM( + # 60 + len(self.specifications.classes) + # if i == 0 + # else lstm["hidden_size"] * (2 if lstm["bidirectional"] else 1), + # **one_layer_lstm + # ) + # for i in range(num_layers) + # ] + # ) + + if linear["num_layers"] < 1: + return + + lstm_out_features: int = self.hparams.lstm["hidden_size"] * ( + 2 if self.hparams.lstm["bidirectional"] else 1 + ) + self.linear = nn.ModuleList( + [ + nn.Linear(in_features, out_features) + for in_features, out_features in pairwise( + [ + lstm_out_features, + ] + + [self.hparams.linear["hidden_size"]] + * self.hparams.linear["num_layers"] + ) + ] + ) + + def build(self): + + lstm = dict(self.hparams.lstm) + + if lstm["monolithic"]: + multi_layer_lstm = dict(lstm) + del multi_layer_lstm["monolithic"] + self.lstm = nn.LSTM( + 60 + self.specifications.num_powerset_classes, **multi_layer_lstm + ) + + else: + num_layers = lstm["num_layers"] + if num_layers > 1: + self.dropout = nn.Dropout(p=lstm["dropout"]) + + one_layer_lstm = dict(lstm) + one_layer_lstm["num_layers"] = 1 + one_layer_lstm["dropout"] = 0.0 + del one_layer_lstm["monolithic"] + + self.lstm = nn.ModuleList( + [ + nn.LSTM( + 60 + self.specifications.num_powerset_classes + if i == 0 + else lstm["hidden_size"] * (2 if lstm["bidirectional"] else 1), + **one_layer_lstm + ) + for i in range(num_layers) + ] + ) + + if self.hparams.linear["num_layers"] > 0: + in_features = self.hparams.linear["hidden_size"] + else: + in_features = self.hparams.lstm["hidden_size"] * ( + 2 if self.hparams.lstm["bidirectional"] else 1 + ) + + if self.specifications.powerset: + out_features = self.specifications.num_powerset_classes + else: + out_features = len(self.specifications.classes) + + self.classifier = nn.Linear(in_features, out_features) + self.activation = self.default_activation() + + def forward( + self, waveforms: torch.Tensor, guide: Optional[torch.Tensor] = None + ) -> torch.Tensor: + """Pass forward + Parameters + ---------- + waveforms : (batch, channel, sample) + guide : (batch, frame, classes), optional + Returns + ------- + scores : (batch, frame, classes) + """ + + outputs = self.sincnet(waveforms) + batch_size, num_features, num_frames = outputs.shape + + # TODO: add support for powerset encoding in guide + num_speakers = len(self.specifications.classes) + num_speakers_powerset = self.specifications.num_powerset_classes + if guide is None: + guide = torch.log(torch.full((batch_size,num_frames,num_speakers_powerset), fill_value=1/num_speakers_powerset, device=outputs.device, dtype=outputs.dtype)) + else: + _batch_size, _num_frames, _num_speakers = guide.shape + assert _batch_size == batch_size + assert _num_frames == num_frames + assert _num_speakers == num_speakers_powerset + + guide = rearrange(guide, "batch frame speakers -> batch speakers frame") + outputs = torch.cat([outputs, guide], dim=1) + if self.hparams.lstm["monolithic"]: + outputs, _ = self.lstm( + rearrange(outputs, "batch feature frame -> batch frame feature") + ) + else: + outputs = rearrange(outputs, "batch feature frame -> batch frame feature") + for i, lstm in enumerate(self.lstm): + outputs, _ = lstm(outputs) + if i + 1 < self.hparams.lstm["num_layers"]: + outputs = self.dropout(outputs) + + if self.hparams.linear["num_layers"] > 0: + for linear in self.linear: + outputs = F.leaky_relu(linear(outputs)) + + return self.activation(self.classifier(outputs)) \ No newline at end of file diff --git a/pyannote/audio/tasks/segmentation/GuidedSpeakerDiarization b/pyannote/audio/tasks/segmentation/GuidedSpeakerDiarization new file mode 100644 index 000000000..e2c4c1771 --- /dev/null +++ b/pyannote/audio/tasks/segmentation/GuidedSpeakerDiarization @@ -0,0 +1,992 @@ +# MIT License +# +# Copyright (c) 2020- CNRS +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math +import random +import itertools +from typing import Dict, Optional, Sequence, Text, Union +from torch.utils.data._utils.collate import default_collate + +import numpy as np +import torch +import torch.nn.functional +from collections import defaultdict +from matplotlib import pyplot as plt +from pyannote.core import Segment, SlidingWindowFeature + +from pyannote.database.protocol import SegmentationProtocol, SpeakerDiarizationProtocol +from pyannote.database.protocol.protocol import Scope, Subset +from pytorch_lightning.loggers import MLFlowLogger, TensorBoardLogger +from torch_audiomentations import OneOf +from torch_audiomentations.core.transforms_interface import BaseWaveformTransform +from torch_audiomentations.utils.object_dict import ObjectDict +from torchmetrics import Metric + +from pyannote.audio.core.task import Problem, Resolution, Specifications, Task +from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin +from pyannote.audio.torchmetrics import ( + DiarizationErrorRate, + FalseAlarmRate, + MissedDetectionRate, + OptimalDiarizationErrorRate, + OptimalDiarizationErrorRateThreshold, + OptimalFalseAlarmRate, + OptimalMissedDetectionRate, + OptimalSpeakerConfusionRate, + SpeakerConfusionRate, +) +from pyannote.audio.utils.loss import nll_loss +from pyannote.audio.utils.permutation import permutate +from pyannote.audio.utils.powerset import Powerset + +Subsets = list(Subset.__args__) +Scopes = list(Scope.__args__) + + +class GuidedSpeakerDiarization(SegmentationTaskMixin, Task): + """Guided speaker diarization + Parameters + ---------- + protocol : SpeakerDiarizationProtocol + pyannote.database protocol + duration : float, optional + Chunks duration. Defaults to 10s. + max_speakers_per_chunk : int, optional + Maximum number of speakers per chunk. Defaults to 3. + max_speakers_per_frame : int, optional + Maximum number of (overlapping) speakers per frame. Defaults to 2. + balance: str, optional + When provided, training samples are sampled uniformly with respect to that key. + For instance, setting `balance` to "database" will make sure that each database + will be equally represented in the training samples. + freedom : float, optional + Controls how much freedom the model is allowed regarding the provided guide. + 0.0 means that the model is forced to follow the guide exactly. + 1.0 means that the model is free to ignore the guide completely. + Defaults to 0.5. + batch_size : int, optional + Number of training samples per batch. Defaults to 32. + num_workers : int, optional + Number of workers used for generating training samples. + Defaults to multiprocessing.cpu_count() // 2. + pin_memory : bool, optional + If True, data loaders will copy tensors into CUDA pinned + memory before returning them. See pytorch documentation + for more details. Defaults to False. + augmentation : BaseWaveformTransform, optional + torch_audiomentations waveform transform, used by dataloader + during training. + metric : optional + Validation metric(s). Can be anything supported by torchmetrics.MetricCollection. + Defaults to AUROC (area under the ROC curve). + """ + + def __init__( + self, + protocol: SpeakerDiarizationProtocol, + duration: float = 10.0, + max_speakers_per_chunk: int = 3, + max_speakers_per_frame: int = 2, + balance: Text = None, + freedom: float = 0.5, + batch_size: int = 32, + num_workers: int = None, + pin_memory: bool = False, + augmentation: BaseWaveformTransform = None, + metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None, + step: float = None, + ): + super().__init__( + protocol, + duration=duration, + batch_size=batch_size, + num_workers=num_workers, + pin_memory=pin_memory, + augmentation=augmentation, + metric=metric, + ) + + if not isinstance(protocol, SpeakerDiarizationProtocol): + raise ValueError( + "SpeakerDiarization task requires a SpeakerDiarizationProtocol." + ) + + self.max_speakers_per_chunk = max_speakers_per_chunk + self.max_speakers_per_frame = max_speakers_per_frame + self.balance = balance + self.freedom = freedom + if step == None: + self.step = self.duration / 10 + else: + self.step = step + + self.specifications = Specifications( + problem=Problem.MONO_LABEL_CLASSIFICATION, + resolution=Resolution.FRAME, + duration=self.duration, + classes=[f"speaker#{i+1}" for i in range(self.max_speakers_per_chunk)], + powerset_max_classes=self.max_speakers_per_frame, + permutation_invariant=True, + ) + + def setup_loss_func(self): + self.model.powerset = Powerset( + len(self.specifications.classes), + self.specifications.powerset_max_classes, + ) + + def setup(self): + """Setup""" + + # duration of training chunks + # TODO: handle variable duration case + duration = getattr(self, "duration", 0.0) + + # list of possible values for each metadata key + metadata_unique_values = defaultdict(list) + + metadata_unique_values["subset"] = Subsets + + if isinstance(self.protocol, SpeakerDiarizationProtocol): + metadata_unique_values["scope"] = Scopes + + elif isinstance(self.protocol, SegmentationProtocol): + classes = getattr(self, "classes", list()) + + # make sure classes attribute exists (and set to None if it did not exist) + self.classes = getattr(self, "classes", None) + if self.classes is None: + classes = list() + # metadata_unique_values["classes"] = list(classes) + + audios = list() # list of path to audio files + audio_infos = list() + audio_encodings = list() + metadata = list() # list of metadata + + annotated_duration = list() # total duration of annotated regions (per file) + annotated_regions = list() # annotated regions + annotations = list() # actual annotations + annotated_classes = list() # list of annotated classes (per file) + unique_labels = list() + + if self.has_validation: + files_iter = itertools.chain( + self.protocol.train(), self.protocol.development() + ) + else: + files_iter = self.protocol.train() + + for file_id, file in enumerate(files_iter): + # gather metadata and update metadata_unique_values so that each metadatum + # (e.g. source database or label) is represented by an integer. + metadatum = dict() + + # keep track of source database and subset (train, development, or test) + if file["database"] not in metadata_unique_values["database"]: + metadata_unique_values["database"].append(file["database"]) + metadatum["database"] = metadata_unique_values["database"].index( + file["database"] + ) + metadatum["subset"] = Subsets.index(file["subset"]) + + # keep track of speaker label scope (file, database, or global) for speaker diarization protocols + if isinstance(self.protocol, SpeakerDiarizationProtocol): + metadatum["scope"] = Scopes.index(file["scope"]) + + # keep track of list of classes for regular segmentation protocols + # Different files may be annotated using a different set of classes + # (e.g. one database for speech/music/noise, and another one for male/female/child) + if isinstance(self.protocol, SegmentationProtocol): + if "classes" in file: + local_classes = file["classes"] + else: + local_classes = file["annotation"].labels() + + # if task was not initialized with a fixed list of classes, + # we build it as the union of all classes found in files + if self.classes is None: + for klass in local_classes: + if klass not in classes: + classes.append(klass) + annotated_classes.append( + [classes.index(klass) for klass in local_classes] + ) + + # if task was initialized with a fixed list of classes, + # we make sure that all files use a subset of these classes + # if they don't, we issue a warning and ignore the extra classes + else: + extra_classes = set(local_classes) - set(self.classes) + if extra_classes: + warnings.warn( + f"Ignoring extra classes ({', '.join(extra_classes)}) found for file {file['uri']} ({file['database']}). " + ) + annotated_classes.append( + [ + self.classes.index(klass) + for klass in set(local_classes) & set(self.classes) + ] + ) + + remaining_metadata_keys = set(file) - set( + [ + "uri", + "database", + "subset", + "audio", + "torchaudio.info", + "scope", + "classes", + "annotation", + "annotated", + ] + ) + + # keep track of any other (integer or string) metadata provided by the protocol + # (e.g. a "domain" key for domain-adversarial training) + for key in remaining_metadata_keys: + value = file[key] + + if isinstance(value, str): + if value not in metadata_unique_values[key]: + metadata_unique_values[key].append(value) + metadatum[key] = metadata_unique_values[key].index(value) + + elif isinstance(value, int): + metadatum[key] = value + + else: + warnings.warn( + f"Ignoring '{key}' metadata because of its type ({type(value)}). Only str and int are supported for now.", + category=UserWarning, + ) + + metadata.append(metadatum) + + database_unique_labels = list() + + # reset list of file-scoped labels + file_unique_labels = list() + + # path to audio file + audios.append(str(file["audio"])) + + # audio info + audio_info = file["torchaudio.info"] + audio_infos.append( + ( + audio_info.sample_rate, # sample rate + audio_info.num_frames, # number of frames + audio_info.num_channels, # number of channels + audio_info.bits_per_sample, # bits per sample + ) + ) + audio_encodings.append(audio_info.encoding) # encoding + + # annotated regions and duration + _annotated_duration = 0.0 + for segment in file["annotated"]: + # skip annotated regions that are shorter than training chunk duration + if segment.duration < duration: + continue + + # append annotated region + annotated_region = ( + file_id, + segment.duration, + segment.start, + segment.end, + ) + annotated_regions.append(annotated_region) + + # increment annotated duration + _annotated_duration += segment.duration + + # append annotated duration + annotated_duration.append(_annotated_duration) + + # annotations + for segment, _, label in file["annotation"].itertracks(yield_label=True): + # "scope" is provided by speaker diarization protocols to indicate + # whether speaker labels are local to the file ('file'), consistent across + # all files in a database ('database'), or globally consistent ('global') + + if "scope" in file: + # 0 = 'file' + # 1 = 'database' + # 2 = 'global' + scope = Scopes.index(file["scope"]) + + # update list of file-scope labels + if label not in file_unique_labels: + file_unique_labels.append(label) + # and convert label to its (file-scope) index + file_label_idx = file_unique_labels.index(label) + + database_label_idx = global_label_idx = -1 + + if scope > 0: # 'database' or 'global' + # update list of database-scope labels + if label not in database_unique_labels: + database_unique_labels.append(label) + + # and convert label to its (database-scope) index + database_label_idx = database_unique_labels.index(label) + + if scope > 1: # 'global' + # update list of global-scope labels + if label not in unique_labels: + unique_labels.append(label) + # and convert label to its (global-scope) index + global_label_idx = unique_labels.index(label) + + # basic segmentation protocols do not provide "scope" information + # as classes are global by definition + + else: + try: + file_label_idx = ( + database_label_idx + ) = global_label_idx = classes.index(label) + except ValueError: + # skip labels that are not in the list of classes + continue + + annotations.append( + ( + file_id, # index of file + segment.start, # start time + segment.end, # end time + file_label_idx, # file-scope label index + database_label_idx, # database-scope label index + global_label_idx, # global-scope index + ) + ) + + # since not all metadata keys are present in all files, fallback to -1 when a key is missing + metadata = [ + tuple(metadatum.get(key, -1) for key in metadata_unique_values) + for metadatum in metadata + ] + dtype = [(key, "i") for key in metadata_unique_values] + self.metadata = np.array(metadata, dtype=dtype) + + # NOTE: read with str(self.audios[file_id], encoding='utf-8') + self.audios = np.array(audios, dtype=np.string_) + + # turn list of files metadata into a single numpy array + # TODO: improve using https://github.com/pytorch/pytorch/issues/13246#issuecomment-617140519 + + dtype = [ + ("sample_rate", "i"), + ("num_frames", "i"), + ("num_channels", "i"), + ("bits_per_sample", "i"), + ] + self.audio_infos = np.array(audio_infos, dtype=dtype) + self.audio_encodings = np.array(audio_encodings, dtype=np.string_) + + self.annotated_duration = np.array(annotated_duration) + + # turn list of annotated regions into a single numpy array + dtype = [("file_id", "i"), ("duration", "f"), ("start", "f"), ("end", "f")] + self.annotated_regions = np.array(annotated_regions, dtype=dtype) + + # convert annotated_classes (which is a list of list of classes, one list of classes per file) + # into a single (num_files x num_classes) numpy array: + # * True indicates that this particular class was annotated for this particular file (though it may not be active in this file) + # * False indicates that this particular class was not even annotated (i.e. its absence does not imply that it is not active in this file) + if isinstance(self.protocol, SegmentationProtocol) and self.classes is None: + self.classes = classes + self.annotated_classes = np.zeros( + (len(annotated_classes), len(self.classes)), dtype=np.bool_ + ) + for file_id, classes in enumerate(annotated_classes): + self.annotated_classes[file_id, classes] = True + + # turn list of annotations into a single numpy array + dtype = [ + ("file_id", "i"), + ("start", "f"), + ("end", "f"), + ("file_label_idx", "i"), + ("database_label_idx", "i"), + ("global_label_idx", "i"), + ] + self.annotations = np.array(annotations, dtype=dtype) + + self.metadata_unique_values = metadata_unique_values + + if not self.has_validation: + return + + validation_chunks = list() + + # obtain indexes of files in the validation subset + validation_file_ids = np.where( + self.metadata["subset"] == Subsets.index("development") + )[0] + + # iterate over files in the validation subset + for file_id in validation_file_ids: + # get annotated regions in file + annotated_regions = self.annotated_regions[ + self.annotated_regions["file_id"] == file_id + ] + + # iterate over annotated regions + for annotated_region in annotated_regions: + # number of chunks in annotated region + num_chunks = round((annotated_region["duration"] - self.duration) // self.step) + print(annotated_region["duration"]) + print(num_chunks) + + # iterate over chunks + for c in range(num_chunks//8): + start_time = annotated_region["start"] + c * self.step + validation_chunks.append((file_id, start_time, duration)) + + dtype = [("file_id", "i"), ("start", "f"), ("duration", "f")] + self.validation_chunks = np.array(validation_chunks, dtype=dtype) + + + def prepare_chunk(self, file_id: int, start_time: float, duration: float, number: int = 2): + """Prepare chunk + Parameters + ---------- + file_id : int + File index + start_time : float + Chunk start time + duration : float + Chunk duration. + Returns + ------- + sample : dict + Dictionary containing the chunk data with the following keys: + - `X`: waveform + - `y`: target as a SlidingWindowFeature instance where y.labels is + in meta.scope space. + - `meta`: + - `scope`: target scope (0: file, 1: database, 2: global) + - `database`: database index + - `file`: file index + """ + + file = self.get_file(file_id) + # get label scope + + label_scope = Scopes[self.metadata[file_id]["scope"]] + label_scope_key = f"{label_scope}_label_idx" + + # + chunk = Segment(start_time, start_time + duration) + + sample = dict() + sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration) + + # gather all annotations of current file + annotations = self.annotations[self.annotations["file_id"] == file_id] + + # gather all annotations with non-empty intersection with current chunk + chunk_annotations = annotations[ + (annotations["start"] < chunk.end) & (annotations["end"] > chunk.start) + ] + + # discretize chunk annotations at model output resolution + start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start + start_idx = np.floor(start / self.model.example_output.frames.step).astype( + int + ) + end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start + end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int) + + # get list and number of labels for current scope + labels = list(np.unique(chunk_annotations[label_scope_key])) + num_labels = len(labels) + + if num_labels > self.max_speakers_per_chunk: + pass + + # initial frame-level targets + num_chunks_per_file = getattr(self, "num_chunks_per_file", 1) + y = np.zeros((self.model.example_output.num_frames, num_labels), dtype=np.uint8) + + # map labels to indices + mapping = {label: idx for idx, label in enumerate(labels)} + + for start, end, label in zip( + start_idx, end_idx, chunk_annotations[label_scope_key] + ): + mapped_label = mapping[label] + y[start:end, mapped_label] = 1 + + sample["y"] = SlidingWindowFeature( + y, self.model.example_output.frames, labels=labels + ) + + metadata = self.metadata[file_id] + sample["meta"] = {key: metadata[key] for key in metadata.dtype.names} + sample["meta"]["file"] = file_id + + sample["number"] = number + + return sample + + def collate_y(self, batch) -> torch.Tensor: + """ + Parameters + ---------- + batch : list + List of samples to collate. + "y" field is expected to be a SlidingWindowFeature. + Returns + ------- + y : torch.Tensor + Collated target tensor of shape (batch_size, num_frames, self.max_speakers_per_chunk) + If one chunk has more than `self.max_speakers_per_chunk` speakers, we keep + the max_speakers_per_chunk most talkative ones. If it has less, we pad with + zeros (artificial inactive speakers). + """ + + collated_y = [] + for b in batch: + y = b["y"].data + num_speakers = len(b["y"].labels) + + # keep only the most talkative speakers + if num_speakers > self.max_speakers_per_chunk: + # sort speakers in descending talkativeness order + indices = np.argsort(-np.sum(y, axis=0), axis=0) + y = y[:, indices[: self.max_speakers_per_chunk]] + + elif num_speakers < self.max_speakers_per_chunk: + # create inactive speakers by zero padding + y = np.pad( + y, + ((0, 0), (0, self.max_speakers_per_chunk - num_speakers)), + mode="constant", + ) + + else: + # we have exactly the right number of speakers + pass + + # shuffle speaker indices (to avoid having them sorted in talkativeness decreasing order) as + # the model might otherwise infer prior probabilities from the order of the speakers. we do + # not want this information (partly computed from the second half of the chunk) to leak. + np.random.shuffle(y.T) + + collated_y.append(y) + + return torch.from_numpy(np.stack(collated_y)) + + def collate_number(self, batch) -> torch.Tensor: + return default_collate([b["number"] for b in batch]) + + + def collate_fn(self, batch, stage="train"): + # collate X + collated_X = self.collate_X(batch) + + # collate y + collated_y = self.collate_y(batch) + + # collate metadata + collated_meta = self.collate_meta(batch) + + collated_number = self.collate_number(batch) + + + # apply augmentation (only in "train" stage) + self.augmentation.train(mode=(stage == "train")) + augmented = self.augmentation( + samples=collated_X, + sample_rate=self.model.hparams.sample_rate, + targets=collated_y.unsqueeze(1), + ) + + return { + "X": augmented.samples, + "y": augmented.targets.squeeze(1), + "meta": collated_meta, + "number": collated_number + } + + def segmentation_loss( + self, + permutated_prediction: torch.Tensor, + target: torch.Tensor, + weight: torch.Tensor = None, + ) -> torch.Tensor: + """Permutation-invariant segmentation loss + Parameters + ---------- + permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor + Permutated speaker activity predictions. + target : (batch_size, num_frames, num_speakers) torch.Tensor + Speaker activity. + weight : (batch_size, num_frames, 1) torch.Tensor, optional + Frames weight. + Returns + -------freedom + seg_loss : torch.Tensor + Permutation-invariant segmentation loss + """ + + return nll_loss( + permutated_prediction, + torch.argmax(target, dim=-1), + weight=weight, + ) + + def train__iter__helper(self, rng: random.Random, **filters): + """Iterate over training samples with optional domain filtering + + Parameters + ---------- + rng : random.Random + Random number generator + filters : dict, optional + When provided (as {key: value} dict), filter training files so that + only files such as file[key] == value are used for generating chunks. + + Yields + ------ + chunk : dict + Training chunks. + """ + + # indices of training files that matches domain filters + training = self.metadata["subset"] == Subsets.index("train") + for key, value in filters.items(): + training &= self.metadata[key] == self.metadata_unique_values[key].index(value) + file_ids = np.where(training)[0] + + # turn annotated duration into a probability distribution + annotated_duration = self.annotated_duration[file_ids] + prob_annotated_duration = annotated_duration / np.sum(annotated_duration) + + duration = self.duration + + num_chunks_per_file = getattr(self, "num_chunks_per_file", 1) + while True: + # select one file at random (with probability proportional to its annotated duration) + file_id = np.random.choice(file_ids, p=prob_annotated_duration) + + # generate `num_chunks_per_file` chunks from this file + for _ in range(num_chunks_per_file): + # find indices of annotated regions in this file + annotated_region_indices = np.where( + self.annotated_regions["file_id"] == file_id + )[0] + + # turn annotated regions duration into a probability distribution + prob_annotated_regions_duration = self.annotated_regions["duration"][ + annotated_region_indices + ] / np.sum(self.annotated_regions["duration"][annotated_region_indices]) + + # selected one annotated region at random (with probability proportional to its duration) + annotated_region_index = np.random.choice( + annotated_region_indices, p=prob_annotated_regions_duration + ) + + # select one chunk at random in this annotated region + _, _, start, end = self.annotated_regions[annotated_region_index] + start_time = rng.uniform(start, end - duration - self.step) + + yield self.prepare_chunk(file_id, start_time, duration,0) + yield self.prepare_chunk(file_id, start_time + self.step, duration,1) + + + def training_step(self, batch, batch_idx: int): + """Compute permutation-invariant segmentation loss + Parameters + ---------- + batch : (usually) dict of torch.Tensor + Current batch. + batch_idx: int + Batch index. + Returns + ------- + loss : {str: torch.tensor} + {"loss": loss} + """ + # target + target_multilabel = batch["y"] + # (batch_size, num_frames, num_speakers) + + waveform = batch["X"] + # (batch_size, num_channels, num_samples) + + # drop samples that contain too many speakers + num_speakers: torch.Tensor = torch.sum( + torch.any(target_multilabel, dim=1), dim=1 + ) + keep: torch.Tensor = num_speakers <= self.max_speakers_per_chunk + target_multilabel = target_multilabel[keep] + waveform = waveform[keep] + + # corner case + if not keep.any(): + return {"loss": 0.0} + + target_powerset = self.model.powerset.to_powerset(target_multilabel.float()) + batch_size = target_powerset.size(0) + num_frames = target_powerset.size(1) + num_speakers = target_powerset.size(2) + + + + #create the guide with the same size as the targets (log because predictions are also log) + guide = torch.log(torch.full(target_powerset.size(), fill_value=1/num_speakers, device=target_multilabel.device)) + guide_length = self.duration - self.step + guide_length = int(np.floor(num_frames * guide_length / self.duration)) # round down + + #No guide forward pass + no_guide_predictions_powerset = self.model(waveform) + # permutate target in multilabel space and convert it to powerset space + no_guide_predictions_multilabel = self.model.powerset.to_multilabel(no_guide_predictions_powerset) + permutated_target_multilabel, _ = permutate(no_guide_predictions_multilabel, target_multilabel) + permutated_target_powerset = self.model.powerset.to_powerset(permutated_target_multilabel.float()) + + # compute loss in powerset space (between soft prediction and permutated target) + no_guide_loss = self.segmentation_loss(no_guide_predictions_powerset, permutated_target_powerset) + + #Even forward pass when initializing + #We associate every even batch elements with a guide from the targets + guide[0:batch_size:2, :guide_length,:] = target_powerset[0:batch_size:2, :guide_length,:] + even_predictions_powerset = self.model(waveform[0:batch_size:2], guide=guide[0:batch_size:2]) + # permutate target in multilabel space and # compute loss in powerset space + even_predictions_multilabel = self.model.powerset.to_multilabel(even_predictions_powerset) + permutated_target_multilabel, _ = permutate(even_predictions_multilabel, target_multilabel[1:batch_size:2]) + permutated_target_powerset = self.model.powerset.to_powerset(permutated_target_multilabel.float()) + + # compute loss for the last frames (where there is no target guide) + end_even_loss = self.segmentation_loss(even_predictions_powerset[:, guide_length:, :], permutated_target_powerset[:, guide_length:, :]) + + #Odd forward pass + #We associate every odd batch elements with a guide from the even predictions (here no guide even predictions but could be from target-guided even predictions) + guide[1:batch_size:2, :guide_length,:] = no_guide_predictions_powerset[0:batch_size:2, num_frames-guide_length:, :] + odd_predictions_powerset = self.model(waveform[1:batch_size:2], guide=guide[1:batch_size:2]) + # permutate target in multilabel space and convert it to powerset space + odd_predictions_multilabel = self.model.powerset.to_multilabel(odd_predictions_powerset) + permutated_target_multilabel, _ = permutate(odd_predictions_multilabel, target_multilabel[1:batch_size:2]) + permutated_target_powerset = self.model.powerset.to_powerset(permutated_target_multilabel.float()) + + # compute loss in powerset space (between soft prediction and permutated target) + guided_loss = self.segmentation_loss(odd_predictions_powerset, permutated_target_powerset) + + + #Now we stack the even and the odd predictions + # soft_prediction_powerset = torch.stack([even_prediction_powerset, odd_prediction_powerset], dim=1) + # soft_prediction_powerset = soft_prediction_powerset.reshape(batch_size, num_frames, even_prediction_powerset.size(2)) + + # decide what pass to use in final loss + end_even_loss = 0 + seg_loss = no_guide_loss + end_even_loss + guided_loss + + self.model.log( + "loss/train/segmentation", + seg_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + loss = seg_loss + + self.model.log( + "loss/train", + loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + return {"loss": loss} + + def default_metric( + self, + ) -> Union[Metric, Sequence[Metric], Dict[str, Metric]]: + """Returns diarization error rate and its components""" + + if self.specifications.powerset: + return { + "DiarizationErrorRate": DiarizationErrorRate(0.5), + + "DiarizationErrorRate/Confusion": SpeakerConfusionRate(0.5), + "DiarizationErrorRate/Miss": MissedDetectionRate(0.5), + "DiarizationErrorRate/FalseAlarm": FalseAlarmRate(0.5), + } + + return { + "DiarizationErrorRate": OptimalDiarizationErrorRate(), + "DiarizationErrorRate/Threshold": OptimalDiarizationErrorRateThreshold(), + "DiarizationErrorRate/Confusion": OptimalSpeakerConfusionRate(), + "DiarizationErrorRate/Miss": OptimalMissedDetectionRate(), + "DiarizationErrorRate/FalseAlarm": OptimalFalseAlarmRate(), + } + + + def val__getitem__(self, idx): + validation_chunk = self.validation_chunks[idx] + return self.prepare_chunk( + validation_chunk["file_id"], + validation_chunk["start"], + duration=validation_chunk["duration"], + number=idx + ) + + # TODO: no need to compute gradient in this method + def validation_step(self, batch, batch_idx: int): + """Compute validation loss and metric + Parameters + ---------- + batch : dict of torch.Tensor + Current batch. + batch_idx: int + Batch index. + """ + print(batch["number"]) + # target + target = batch["y"] + # (batch_size, num_frames, num_speakers) + + waveform = batch["X"] + # (batch_size, num_channels, num_samples) + + # TODO: should we handle validation samples with too many speakers + # waveform = waveform[keep] + # target = target[keep] + target_powerset = self.model.powerset.to_powerset(target.float()) + batch_size = target_powerset.size(0) + num_frames = target_powerset.size(1) + num_speakers = target_powerset.size(2) + + + #create the guide with the same size as the targets + guide = torch.log(torch.full(target_powerset.size(), fill_value=1/num_speakers, device=target.device)) + guide_length = self.duration - self.step + guide_length = int(np.floor(num_frames * guide_length / self.duration)) # round down + + + predictions_powerset = torch.zeros(target_powerset.size(), device=target.device) + predictions_powerset[0] = self.model(waveform[0:1]) + for i in range(1, batch_size): + guide[i, :guide_length] = predictions_powerset[i-1, num_frames-guide_length:] + predictions_powerset[i] = self.model(waveform[i:i+1], guide[i:i+1]) + + multilabel = self.model.powerset.to_multilabel(predictions_powerset) + permutated_target, _ = permutate(multilabel, target) + permutated_target_powerset = self.model.powerset.to_powerset(permutated_target.float()) + + seg_loss = self.segmentation_loss(predictions_powerset[1:], permutated_target_powerset[1:]) + + self.model.log( + "loss/val/segmentation", + seg_loss, + on_step=False, + on_epoch=True, + prog_bar=False, + logger=True, + ) + + self.model.validation_metric( + torch.transpose(multilabel, 1, 2), + torch.transpose(target, 1, 2), + ) + + self.model.log_dict( + self.model.validation_metric, + on_step=False, + on_epoch=True, + prog_bar=True, + logger=True, + ) + + # log first batch visualization every 2^n epochs. + if ( + self.model.current_epoch == 0 + or math.log2(self.model.current_epoch) % 1 > 0 + or batch_idx > 0 + ): + return + + # visualize first 9 validation samples of first batch in Tensorboard/MLflow + + y = permutated_target.float().cpu().numpy() + y_pred = multilabel.cpu().numpy() + + # prepare 3 x 3 grid (or smaller if batch size is smaller) + num_samples = min(self.batch_size, 9) + nrows = math.ceil(math.sqrt(num_samples)) + ncols = math.ceil(num_samples / nrows) + fig, axes = plt.subplots( + nrows=2 * nrows, ncols=ncols, figsize=(8, 5), squeeze=False + ) + + # reshape target so that there is one line per class when plotting it + y[y == 0] = np.NaN + if len(y.shape) == 2: + y = y[:, :, np.newaxis] + y *= np.arange(y.shape[2]) + + # plot each sample + for sample_idx in range(num_samples): + # find where in the grid it should be plotted + row_idx = sample_idx // nrows + col_idx = sample_idx % ncols + + # plot target + ax_ref = axes[row_idx * 2 + 0, col_idx] + sample_y = y[sample_idx] + ax_ref.plot(sample_y) + ax_ref.set_xlim(0, len(sample_y)) + ax_ref.set_ylim(-1, sample_y.shape[1]) + ax_ref.get_xaxis().set_visible(False) + ax_ref.get_yaxis().set_visible(False) + + # plot predictions + ax_hyp = axes[row_idx * 2 + 1, col_idx] + sample_y_pred = y_pred[sample_idx] + ax_hyp.plot(sample_y_pred) + ax_hyp.set_ylim(-0.1, 1.1) + ax_hyp.set_xlim(0, len(sample_y)) + ax_hyp.get_xaxis().set_visible(False) + + plt.tight_layout() + + for logger in self.model.loggers: + if isinstance(logger, TensorBoardLogger): + logger.experiment.add_figure("samples", fig, self.model.current_epoch) + elif isinstance(logger, MLFlowLogger): + logger.experiment.log_figure( + run_id=logger.run_id, + figure=fig, + artifact_file=f"samples_epoch{self.model.current_epoch}.png", + ) + + plt.close(fig) \ No newline at end of file From b603690ab559b0e53d7a32b94f9754775a6eb688 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Thu, 8 Feb 2024 16:51:36 +0100 Subject: [PATCH 12/23] create a guided inference to use with a guided model --- pyannote/audio/core/guided_inference.py | 847 ++++++++++++++++++++++++ 1 file changed, 847 insertions(+) create mode 100644 pyannote/audio/core/guided_inference.py diff --git a/pyannote/audio/core/guided_inference.py b/pyannote/audio/core/guided_inference.py new file mode 100644 index 000000000..df7ec9899 --- /dev/null +++ b/pyannote/audio/core/guided_inference.py @@ -0,0 +1,847 @@ +# MIT License +# +# Copyright (c) 2020- CNRS +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math +import warnings +from pathlib import Path +from typing import Callable, List, Optional, Text, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from pyannote.core import Segment, SlidingWindow, SlidingWindowFeature +from pytorch_lightning.utilities.memory import is_oom_error + +from pyannote.audio.core.io import AudioFile +from pyannote.audio.core.model import Model, Specifications +from pyannote.audio.core.task import Resolution +from pyannote.audio.utils.multi_task import map_with_specifications +from pyannote.audio.utils.permutation import mae_cost_func, permutate +from pyannote.audio.utils.powerset import Powerset +from pyannote.audio.utils.reproducibility import fix_reproducibility + + +class BaseInference: + pass + + +class GuidedInference(BaseInference): + """Inference + + Parameters + ---------- + model : Model + Model. Will be automatically set to eval() mode and moved to `device` when provided. + window : {"sliding", "whole"}, optional + Use a "sliding" window and aggregate the corresponding outputs (default) + or just one (potentially long) window covering the "whole" file or chunk. + duration : float, optional + Chunk duration, in seconds. Defaults to duration used for training the model. + Has no effect when `window` is "whole". + step : float, optional + Step between consecutive chunks, in seconds. Defaults to warm-up duration when + greater than 0s, otherwise 10% of duration. Has no effect when `window` is "whole". + pre_aggregation_hook : callable, optional + When a callable is provided, it is applied to the model output, just before aggregation. + Takes a (num_chunks, num_frames, dimension) numpy array as input and returns a modified + (num_chunks, num_frames, other_dimension) numpy array passed to overlap-add aggregation. + skip_aggregation : bool, optional + Do not aggregate outputs when using "sliding" window. Defaults to False. + skip_conversion: bool, optional + In case a task has been trained with `powerset` mode, output is automatically + converted to `multi-label`, unless `skip_conversion` is set to True. + batch_size : int, optional + Batch size. Larger values (should) make inference faster. Defaults to 32. + device : torch.device, optional + Device used for inference. Defaults to `model.device`. + In case `device` and `model.device` are different, model is sent to device. + use_auth_token : str, optional + When loading a private huggingface.co model, set `use_auth_token` + to True or to a string containing your hugginface.co authentication + token that can be obtained by running `huggingface-cli login` + """ + + def __init__( + self, + model: Union[Model, Text, Path], + window: Text = "sliding", + duration: float = None, + step: float = None, + pre_aggregation_hook: Callable[[np.ndarray], np.ndarray] = None, + skip_aggregation: bool = False, + skip_conversion: bool = False, + device: torch.device = None, + batch_size: int = 32, + use_auth_token: Union[Text, None] = None, + ): + # ~~~~ model ~~~~~ + + self.model = ( + model + if isinstance(model, Model) + else Model.from_pretrained( + model, + map_location=device, + strict=False, + use_auth_token=use_auth_token, + ) + ) + + if device is None: + device = self.model.device + self.device = device + + self.model.eval() + self.model.to(self.device) + + specifications = self.model.specifications + + # ~~~~ sliding window ~~~~~ + + if window not in ["sliding", "whole"]: + raise ValueError('`window` must be "sliding" or "whole".') + + if window == "whole" and any( + s.resolution == Resolution.FRAME for s in specifications + ): + warnings.warn( + 'Using "whole" `window` inference with a frame-based model might lead to bad results ' + 'and huge memory consumption: it is recommended to set `window` to "sliding".' + ) + self.window = window + + training_duration = next(iter(specifications)).duration + duration = duration or training_duration + if training_duration != duration: + warnings.warn( + f"Model was trained with {training_duration:g}s chunks, and you requested " + f"{duration:g}s chunks for inference: this might lead to suboptimal results." + ) + self.duration = duration + + # ~~~~ powerset to multilabel conversion ~~~~ + + self.skip_conversion = skip_conversion + + conversion = list() + for s in specifications: + if s.powerset and not skip_conversion: + c = Powerset(len(s.classes), s.powerset_max_classes) + else: + c = nn.Identity() + conversion.append(c.to(self.device)) + + if isinstance(specifications, Specifications): + self.conversion = conversion[0] + else: + self.conversion = nn.ModuleList(conversion) + + # ~~~~ overlap-add aggregation ~~~~~ + + self.skip_aggregation = skip_aggregation + self.pre_aggregation_hook = pre_aggregation_hook + + self.warm_up = next(iter(specifications)).warm_up + # Use that many seconds on the left- and rightmost parts of each chunk + # to warm up the model. While the model does process those left- and right-most + # parts, only the remaining central part of each chunk is used for aggregating + # scores during inference. + + # step between consecutive chunks + step = step or ( + 0.1 * self.duration if self.warm_up[0] == 0.0 else self.warm_up[0] + ) + + if step > self.duration: + raise ValueError( + f"Step between consecutive chunks is set to {step:g}s, while chunks are " + f"only {self.duration:g}s long, leading to gaps between consecutive chunks. " + f"Either decrease step or increase duration." + ) + self.step = step + + self.batch_size = batch_size + + def to(self, device: torch.device) -> "Inference": + """Send internal model to `device`""" + + if not isinstance(device, torch.device): + raise TypeError( + f"`device` must be an instance of `torch.device`, got `{type(device).__name__}`" + ) + + self.model.to(device) + self.conversion.to(device) + self.device = device + return self + + def infer(self, chunks: torch.Tensor) -> Union[np.ndarray, Tuple[np.ndarray]]: + """Forward pass + + Takes care of sending chunks to right device and outputs back to CPU + + Parameters + ---------- + chunks : (batch_size, num_channels, num_samples) torch.Tensor + Batch of audio chunks. + + Returns + ------- + outputs : (tuple of) (batch_size, ...) np.ndarray + Model output. + """ + + with torch.inference_mode(): + try: + outputs = self.model(chunks.to(self.device)) + except RuntimeError as exception: + if is_oom_error(exception): + raise MemoryError( + f"batch_size ({self.batch_size: d}) is probably too large. " + f"Try with a smaller value until memory error disappears." + ) + else: + raise exception + + def __convert(output: torch.Tensor, conversion: nn.Module, **kwargs): + return conversion(output).cpu().numpy() + + return map_with_specifications( + self.model.specifications, __convert, outputs, self.conversion + ) + + def slide( + self, + waveform: torch.Tensor, + sample_rate: int, + hook: Optional[Callable], + ) -> Union[SlidingWindowFeature, Tuple[SlidingWindowFeature]]: + """Slide model on a waveform + + Parameters + ---------- + waveform: (num_channels, num_samples) torch.Tensor + Waveform. + sample_rate : int + Sample rate. + hook: Optional[Callable] + When a callable is provided, it is called everytime a batch is + processed with two keyword arguments: + - `completed`: the number of chunks that have been processed so far + - `total`: the total number of chunks + + Returns + ------- + output : (tuple of) SlidingWindowFeature + Model output. Shape is (num_chunks, dimension) for chunk-level tasks, + and (num_frames, dimension) for frame-level tasks. + """ + step_frames = self.model.example_output.frames.closest_frame(self.step) + window_size: int = self.model.audio.get_num_samples(self.duration) + step_size: int = round(self.step * sample_rate) + _, num_samples = waveform.shape + + def __frames( + example_output, specifications: Optional[Specifications] = None + ) -> SlidingWindow: + if specifications.resolution == Resolution.CHUNK: + return SlidingWindow(start=0.0, duration=self.duration, step=self.step) + return example_output.frames + + frames: Union[SlidingWindow, Tuple[SlidingWindow]] = map_with_specifications( + self.model.specifications, + __frames, + self.model.example_output, + ) + + # prepare complete chunks + if num_samples >= window_size: + chunks: torch.Tensor = rearrange( + waveform.unfold(1, window_size, step_size), + "channel chunk frame -> chunk channel frame", + ) + num_chunks, _, _ = chunks.shape + else: + num_chunks = 0 + + # prepare last incomplete chunk + has_last_chunk = (num_samples < window_size) or ( + num_samples - window_size + ) % step_size > 0 + if has_last_chunk: + # pad last chunk with zeros + last_chunk: torch.Tensor = waveform[:, num_chunks * step_size :] + _, last_window_size = last_chunk.shape + last_pad = window_size - last_window_size + last_chunk = F.pad(last_chunk, (0, last_pad)) + + def __empty_list(**kwargs): + return list() + + outputs: Union[ + List[np.ndarray], Tuple[List[np.ndarray]] + ] = map_with_specifications(self.model.specifications, __empty_list) + + if hook is not None: + hook(completed=0, total=num_chunks + has_last_chunk) + + def __append_batch(output, batch_output, **kwargs) -> None: + output.append(batch_output) + return + + # slide over audio chunks in batch + _ = map_with_specifications( + self.model.specifications, __append_batch, outputs, self.model(chunks[0].to(self.device)).detach().cpu().numpy() + ) + + num_frames = outputs[0].shape[1] + num_speakers = outputs[0].shape[2] + guide = torch.log(torch.full((1,num_frames,num_speakers), fill_value=1/num_speakers, device=self.device)) + for c in np.arange(1, num_chunks): + guide[:,:num_frames-2*step_frames] = torch.from_numpy(outputs[c-1][:,step_frames:num_frames-step_frames]) + batch_outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.model(chunks[c].to(self.device), guide) + + _ = map_with_specifications( + self.model.specifications, __append_batch, outputs, batch_outputs.detach().cpu().numpy() + ) + + if hook is not None: + hook(completed=c + self.batch_size, total=num_chunks + has_last_chunk) + + # process orphan last chunk + if has_last_chunk: + guide[:,:num_frames-2*step_frames] = torch.from_numpy(outputs[c-1][:,step_frames:num_frames-step_frames]) + last_outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.model(last_chunk[None].to(self.device), guide) + + _ = map_with_specifications( + self.model.specifications, __append_batch, outputs, last_outputs.detach().cpu().numpy() + ) + + if hook is not None: + hook( + completed=num_chunks + has_last_chunk, + total=num_chunks + has_last_chunk, + ) + + def __vstack(output: List[np.ndarray], **kwargs) -> np.ndarray: + return np.vstack(output) + + outputs: Union[np.ndarray, Tuple[np.ndarray]] = map_with_specifications( + self.model.specifications, __vstack, outputs + ) + + powerset = Powerset(len(self.model.specifications.classes),num_speakers) + outputs = powerset.to_multilabel(torch.from_numpy(outputs)) + outputs = outputs.detach().cpu().numpy() + + def __aggregate( + outputs: np.ndarray, + frames: SlidingWindow, + specifications: Optional[Specifications] = None, + ) -> SlidingWindowFeature: + # skip aggregation when requested, + # or when model outputs just one vector per chunk + # or when model is permutation-invariant (and not post-processed) + if ( + self.skip_aggregation + or specifications.resolution == Resolution.CHUNK + or ( + specifications.permutation_invariant + and self.pre_aggregation_hook is None + ) + ): + frames = SlidingWindow( + start=0.0, duration=self.duration, step=self.step + ) + return SlidingWindowFeature(outputs, frames) + + if self.pre_aggregation_hook is not None: + outputs = self.pre_aggregation_hook(outputs) + + aggregated = self.aggregate( + SlidingWindowFeature( + outputs, + SlidingWindow(start=0.0, duration=self.duration, step=self.step), + ), + frames=frames, + warm_up=self.warm_up, + hamming=True, + missing=0.0, + ) + + # remove padding that was added to last chunk + if has_last_chunk: + aggregated.data = aggregated.crop( + Segment(0.0, num_samples / sample_rate), mode="loose" + ) + + return aggregated + + return map_with_specifications( + self.model.specifications, __aggregate, outputs, frames + ) + + def __call__( + self, file: AudioFile, hook: Optional[Callable] = None + ) -> Union[ + Tuple[Union[SlidingWindowFeature, np.ndarray]], + Union[SlidingWindowFeature, np.ndarray], + ]: + """Run inference on a whole file + + Parameters + ---------- + file : AudioFile + Audio file. + hook : callable, optional + When a callable is provided, it is called everytime a batch is processed + with two keyword arguments: + - `completed`: the number of chunks that have been processed so far + - `total`: the total number of chunks + + Returns + ------- + output : (tuple of) SlidingWindowFeature or np.ndarray + Model output, as `SlidingWindowFeature` if `window` is set to "sliding" + and `np.ndarray` if is set to "whole". + + """ + + fix_reproducibility(self.device) + + waveform, sample_rate = self.model.audio(file) + + if self.window == "sliding": + return self.slide(waveform, sample_rate, hook=hook) + + outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.infer(waveform[None]) + + def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray: + return outputs[0] + + return map_with_specifications( + self.model.specifications, __first_sample, outputs + ) + + def crop( + self, + file: AudioFile, + chunk: Union[Segment, List[Segment]], + duration: Optional[float] = None, + hook: Optional[Callable] = None, + ) -> Union[ + Tuple[Union[SlidingWindowFeature, np.ndarray]], + Union[SlidingWindowFeature, np.ndarray], + ]: + """Run inference on a chunk or a list of chunks + + Parameters + ---------- + file : AudioFile + Audio file. + chunk : Segment or list of Segment + Apply model on this chunk. When a list of chunks is provided and + window is set to "sliding", this is equivalent to calling crop on + the smallest chunk that contains all chunks. In case window is set + to "whole", this is equivalent to concatenating each chunk into one + (artifical) chunk before processing it. + duration : float, optional + Enforce chunk duration (in seconds). This is a hack to avoid rounding + errors that may result in a different number of audio samples for two + chunks of the same duration. + hook : callable, optional + When a callable is provided, it is called everytime a batch is processed + with two keyword arguments: + - `completed`: the number of chunks that have been processed so far + - `total`: the total number of chunks + + Returns + ------- + output : (tuple of) SlidingWindowFeature or np.ndarray + Model output, as `SlidingWindowFeature` if `window` is set to "sliding" + and `np.ndarray` if is set to "whole". + + Notes + ----- + If model needs to be warmed up, remember to extend the requested chunk with the + corresponding amount of time so that it is actually warmed up when processing the + chunk of interest: + >>> chunk_of_interest = Segment(10, 15) + >>> extended_chunk = Segment(10 - warm_up, 15 + warm_up) + >>> inference.crop(file, extended_chunk).crop(chunk_of_interest, returns_data=False) + """ + + fix_reproducibility(self.device) + + if self.window == "sliding": + if not isinstance(chunk, Segment): + start = min(c.start for c in chunk) + end = max(c.end for c in chunk) + chunk = Segment(start=start, end=end) + + waveform, sample_rate = self.model.audio.crop( + file, chunk, duration=duration + ) + outputs: Union[ + SlidingWindowFeature, Tuple[SlidingWindowFeature] + ] = self.slide(waveform, sample_rate, hook=hook) + + def __shift(output: SlidingWindowFeature, **kwargs) -> SlidingWindowFeature: + frames = output.sliding_window + shifted_frames = SlidingWindow( + start=chunk.start, duration=frames.duration, step=frames.step + ) + return SlidingWindowFeature(output.data, shifted_frames) + + return map_with_specifications(self.model.specifications, __shift, outputs) + + if isinstance(chunk, Segment): + waveform, sample_rate = self.model.audio.crop( + file, chunk, duration=duration + ) + else: + waveform = torch.cat( + [self.model.audio.crop(file, c)[0] for c in chunk], dim=1 + ) + + outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.infer(waveform[None]) + + def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray: + return outputs[0] + + return map_with_specifications( + self.model.specifications, __first_sample, outputs + ) + + @staticmethod + def aggregate( + scores: SlidingWindowFeature, + frames: SlidingWindow = None, + warm_up: Tuple[float, float] = (0.0, 0.0), + epsilon: float = 1e-12, + hamming: bool = False, + missing: float = np.NaN, + skip_average: bool = False, + ) -> SlidingWindowFeature: + """Aggregation + + Parameters + ---------- + scores : SlidingWindowFeature + Raw (unaggregated) scores. Shape is (num_chunks, num_frames_per_chunk, num_classes). + frames : SlidingWindow, optional + Frames resolution. Defaults to estimate it automatically based on `scores` shape + and chunk size. Providing the exact frame resolution (when known) leads to better + temporal precision. + warm_up : (float, float) tuple, optional + Left/right warm up duration (in seconds). + missing : float, optional + Value used to replace missing (ie all NaNs) values. + skip_average : bool, optional + Skip final averaging step. + + Returns + ------- + aggregated_scores : SlidingWindowFeature + Aggregated scores. Shape is (num_frames, num_classes) + """ + + num_chunks, num_frames_per_chunk, num_classes = scores.data.shape + + chunks = scores.sliding_window + if frames is None: + duration = step = chunks.duration / num_frames_per_chunk + frames = SlidingWindow(start=chunks.start, duration=duration, step=step) + else: + frames = SlidingWindow( + start=chunks.start, + duration=frames.duration, + step=frames.step, + ) + + masks = 1 - np.isnan(scores) + scores.data = np.nan_to_num(scores.data, copy=True, nan=0.0) + + # Hamming window used for overlap-add aggregation + hamming_window = ( + np.hamming(num_frames_per_chunk).reshape(-1, 1) + if hamming + else np.ones((num_frames_per_chunk, 1)) + ) + + # anything before warm_up_left (and after num_frames_per_chunk - warm_up_right) + # will not be used in the final aggregation + + # warm-up windows used for overlap-add aggregation + warm_up_window = np.ones((num_frames_per_chunk, 1)) + # anything before warm_up_left will not contribute to aggregation + warm_up_left = round( + warm_up[0] / scores.sliding_window.duration * num_frames_per_chunk + ) + warm_up_window[:warm_up_left] = epsilon + # anything after num_frames_per_chunk - warm_up_right either + warm_up_right = round( + warm_up[1] / scores.sliding_window.duration * num_frames_per_chunk + ) + warm_up_window[num_frames_per_chunk - warm_up_right :] = epsilon + + # aggregated_output[i] will be used to store the sum of all predictions + # for frame #i + num_frames = ( + frames.closest_frame( + scores.sliding_window.start + + scores.sliding_window.duration + + (num_chunks - 1) * scores.sliding_window.step + ) + + 1 + ) + + print("inferencetot",num_frames) + + aggregated_output: np.ndarray = np.zeros( + (num_frames, num_classes), dtype=np.float32 + ) + + # overlapping_chunk_count[i] will be used to store the number of chunks + # that contributed to frame #i + overlapping_chunk_count: np.ndarray = np.zeros( + (num_frames, num_classes), dtype=np.float32 + ) + + # aggregated_mask[i] will be used to indicate whether + # at least one non-NAN frame contributed to frame #i + aggregated_mask: np.ndarray = np.zeros( + (num_frames, num_classes), dtype=np.float32 + ) + # loop on the scores of sliding chunks + for (chunk, score), (_, mask) in zip(scores, masks): + # chunk ~ Segment + # score ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray + # mask ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray + start_frame = frames.closest_frame(chunk.start) + if start_frame + num_frames_per_chunk > num_frames: + print("here") + else: + aggregated_output[start_frame : start_frame + num_frames_per_chunk] += ( + score * mask * hamming_window * warm_up_window + ) + + overlapping_chunk_count[ + start_frame : start_frame + num_frames_per_chunk + ] += (mask * hamming_window * warm_up_window) + + aggregated_mask[ + start_frame : start_frame + num_frames_per_chunk + ] = np.maximum( + aggregated_mask[start_frame : start_frame + num_frames_per_chunk], + mask, + ) + + if skip_average: + average = aggregated_output + else: + average = aggregated_output / np.maximum(overlapping_chunk_count, epsilon) + + average[aggregated_mask == 0.0] = missing + + return SlidingWindowFeature(average, frames) + + @staticmethod + def trim( + scores: SlidingWindowFeature, + warm_up: Tuple[float, float] = (0.1, 0.1), + ) -> SlidingWindowFeature: + """Trim left and right warm-up regions + + Parameters + ---------- + scores : SlidingWindowFeature + (num_chunks, num_frames, num_classes)-shaped scores. + warm_up : (float, float) tuple + Left/right warm up ratio of chunk duration. + Defaults to (0.1, 0.1), i.e. 10% on both sides. + + Returns + ------- + trimmed : SlidingWindowFeature + (num_chunks, trimmed_num_frames, num_speakers)-shaped scores + """ + + assert ( + scores.data.ndim == 3 + ), "Inference.trim expects (num_chunks, num_frames, num_classes)-shaped `scores`" + _, num_frames, _ = scores.data.shape + + chunks = scores.sliding_window + + num_frames_left = round(num_frames * warm_up[0]) + num_frames_right = round(num_frames * warm_up[1]) + + num_frames_step = round(num_frames * chunks.step / chunks.duration) + if num_frames - num_frames_left - num_frames_right < num_frames_step: + warnings.warn( + f"Total `warm_up` is so large ({sum(warm_up) * 100:g}% of each chunk) " + f"that resulting trimmed scores does not cover a whole step ({chunks.step:g}s)" + ) + new_data = scores.data[:, num_frames_left : num_frames - num_frames_right] + + new_chunks = SlidingWindow( + start=chunks.start + warm_up[0] * chunks.duration, + step=chunks.step, + duration=(1 - warm_up[0] - warm_up[1]) * chunks.duration, + ) + + return SlidingWindowFeature(new_data, new_chunks) + + @staticmethod + def stitch( + activations: SlidingWindowFeature, + frames: SlidingWindow = None, + lookahead: Optional[Tuple[int, int]] = None, + cost_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None, + match_func: Callable[[np.ndarray, np.ndarray, float], bool] = None, + ) -> SlidingWindowFeature: + """ + + Parameters + ---------- + activations : SlidingWindowFeature + (num_chunks, num_frames, num_classes)-shaped scores. + frames : SlidingWindow, optional + Frames resolution. Defaults to estimate it automatically based on `activations` + shape and chunk size. Providing the exact frame resolution (when known) leads to better + temporal precision. + lookahead : (int, int) tuple + Number of past and future adjacent chunks to use for stitching. + Defaults to (k, k) with k = chunk_duration / chunk_step - 1 + cost_func : callable + Cost function used to find the optimal mapping between two chunks. + Expects two (num_frames, num_classes) torch.tensor as input + and returns cost as a (num_classes, ) torch.tensor + Defaults to mean absolute error (utils.permutations.mae_cost_func) + match_func : callable + Function used to decide whether two speakers mapped by the optimal + mapping actually are a match. + Expects two (num_frames, ) np.ndarray and the cost (from cost_func) + and returns a boolean. Defaults to always returning True. + """ + + num_chunks, num_frames, num_classes = activations.data.shape + + chunks: SlidingWindow = activations.sliding_window + + if frames is None: + duration = step = chunks.duration / num_frames + frames = SlidingWindow(start=chunks.start, duration=duration, step=step) + else: + frames = SlidingWindow( + start=chunks.start, + duration=frames.duration, + step=frames.step, + ) + + max_lookahead = math.floor(chunks.duration / chunks.step - 1) + if lookahead is None: + lookahead = 2 * (max_lookahead,) + + assert all(L <= max_lookahead for L in lookahead) + + if cost_func is None: + cost_func = mae_cost_func + + if match_func is None: + + def always_match(this: np.ndarray, that: np.ndarray, cost: float): + return True + + match_func = always_match + + stitches = [] + for C, (chunk, activation) in enumerate(activations): + local_stitch = np.NAN * np.zeros( + (sum(lookahead) + 1, num_frames, num_classes) + ) + + for c in range( + max(0, C - lookahead[0]), min(num_chunks, C + lookahead[1] + 1) + ): + # extract common temporal support + shift = round((C - c) * num_frames * chunks.step / chunks.duration) + + if shift < 0: + shift = -shift + this_activations = activation[shift:] + that_activations = activations[c, : num_frames - shift] + else: + this_activations = activation[: num_frames - shift] + that_activations = activations[c, shift:] + + # find the optimal one-to-one mapping + _, (permutation,), (cost,) = permutate( + this_activations[np.newaxis], + that_activations, + cost_func=cost_func, + return_cost=True, + ) + + for this, that in enumerate(permutation): + # only stitch under certain condiditions + matching = (c == C) or ( + match_func( + this_activations[:, this], + that_activations[:, that], + cost[this, that], + ) + ) + + if matching: + local_stitch[c - C + lookahead[0], :, this] = activations[ + c, :, that + ] + + # TODO: do not lookahead further once a mismatch is found + + stitched_chunks = SlidingWindow( + start=chunk.start - lookahead[0] * chunks.step, + duration=chunks.duration, + step=chunks.step, + ) + + local_stitch = Inference.aggregate( + SlidingWindowFeature(local_stitch, stitched_chunks), + frames=frames, + hamming=True, + ) + + stitches.append(local_stitch.data) + + stitches = np.stack(stitches) + stitched_chunks = SlidingWindow( + start=chunks.start - lookahead[0] * chunks.step, + duration=chunks.duration + sum(lookahead) * chunks.step, + step=chunks.step, + ) + + return SlidingWindowFeature(stitches, stitched_chunks) + From 8838a6d26d292f48209430e0f4b94e769e360796 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Thu, 8 Feb 2024 16:54:55 +0100 Subject: [PATCH 13/23] remove the second version of multilatency model to keep only the last one and add back monolithic flag --- .../segmentation/MultilatencyPyanNet.py | 58 ++++++++++--------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py index b6e3f2ffd..d1584857f 100644 --- a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py +++ b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py @@ -83,7 +83,7 @@ class MultilatencyPyanNet(Model): "hidden_size": 128, "num_layers": 2, "bidirectional": True, - "monolithic": True, + "monolithic": False, "dropout": 0.0, } LINEAR_DEFAULTS = {"hidden_size": 128, "num_layers": 2} @@ -96,6 +96,7 @@ def __init__( sample_rate: int = 16000, num_channels: int = 1, task: Optional[Task] = None, + latency_list: Optional[List[float]] = None, ): super().__init__(sample_rate=sample_rate, num_channels=num_channels, task=task) @@ -105,6 +106,11 @@ def __init__( lstm["batch_first"] = True linear = merge_dict(self.LINEAR_DEFAULTS, linear) self.save_hyperparameters("sincnet", "lstm", "linear") + self.hparams.latency_list = latency_list or self.task.latency_list + if self.task is not None: + self.latency_list = self.task.latency_list + else: + self.latency_list = self.hparams.latency_list self.sincnet = SincNet(**self.hparams.sincnet) @@ -112,7 +118,7 @@ def __init__( if monolithic: multi_layer_lstm = dict(lstm) del multi_layer_lstm["monolithic"] - self.lstm = nn.ModuleList([nn.LSTM(60, **multi_layer_lstm) for i in range(len(self.task.latency_list))]) + self.lstm = nn.LSTM(60, **multi_layer_lstm) else: num_layers = lstm["num_layers"] @@ -142,7 +148,8 @@ def __init__( lstm_out_features: int = self.hparams.lstm["hidden_size"] * ( 2 if self.hparams.lstm["bidirectional"] else 1 ) - self.linear = nn.ModuleList([nn.ModuleList( + + self.linear = nn.ModuleList( [ nn.Linear(in_features, out_features) for in_features, out_features in pairwise( @@ -152,8 +159,7 @@ def __init__( + [self.hparams.linear["hidden_size"]] * self.hparams.linear["num_layers"] ) - ] - ) for i in range(len(self.task.latency_list))]) + ]) def build(self): if self.hparams.linear["num_layers"] > 0: @@ -171,7 +177,7 @@ def build(self): else: out_features = len(self.specifications.classes) - self.classifier = nn.ModuleList([nn.Linear(in_features, out_features) for i in range(len(self.task.latency_list))]) + self.classifier = nn.Linear(in_features, out_features * len(self.latency_list)) self.activation = self.default_activation() def forward(self, waveforms: torch.Tensor) -> torch.Tensor: @@ -185,32 +191,28 @@ def forward(self, waveforms: torch.Tensor) -> torch.Tensor: ------- scores : (batch, frame, classes) """ - sincnet_output = self.sincnet(waveforms) - predictions = [] - for k in range(len(self.task.latency_list)): - if self.hparams.lstm["monolithic"]: - outputs, _ = self.lstm[k]( - rearrange(sincnet_output, "batch feature frame -> batch frame feature") - ) - else: - outputs = rearrange(sincnet_output, "batch feature frame -> batch frame feature") - for i, lstm in enumerate(self.lstm): - outputs, _ = lstm(outputs) - if i + 1 < self.hparams.lstm["num_layers"]: - outputs = self.dropout(outputs) - - if self.hparams.linear["num_layers"] > 0: - for linear in self.linear[k]: - outputs = F.leaky_relu(linear(outputs)) - - predictions.append(self.activation(self.classifier[k](outputs))) - predictions = torch.stack(predictions, dim=0) + outputs = self.sincnet(waveforms) + if self.hparams.lstm["monolithic"]: + outputs, _ = self.lstm( + rearrange(outputs, "batch feature frame -> batch frame feature") + ) + else: + outputs = rearrange(outputs, "batch feature frame -> batch frame feature") + for i, lstm in enumerate(self.lstm): + outputs, _ = lstm(outputs) + if i + 1 < self.hparams.lstm["num_layers"]: + outputs = self.dropout(outputs) + if self.hparams.linear["num_layers"] > 0: + for linear in self.linear: + outputs = F.leaky_relu(linear(outputs)) + predictions = self.activation(self.classifier(outputs)) + predictions = predictions.view(predictions.size(0), predictions.size(1), predictions.size(2) // len(self.latency_list), len(self.latency_list)) + predictions = predictions.permute(3, 0, 1, 2) + return predictions - - def __example_input_array(self, duration: Optional[float] = None) -> torch.Tensor: duration = duration or next(iter(self.specifications)).duration return torch.randn( From b2364db8af85673cfcceb64463ba5d3747411d23 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Thu, 21 Mar 2024 16:18:43 +0100 Subject: [PATCH 14/23] make multilatency model more generic so that it can be use directly without changing the inference --- pyannote/audio/core/model.py | 2 +- .../segmentation/MultilatencyPyanNet.py | 225 ++++++++++-------- pyannote/audio/tasks/__init__.py | 4 +- ...py => multilatency_speaker_diarization.py} | 46 ++-- 4 files changed, 146 insertions(+), 131 deletions(-) rename pyannote/audio/tasks/segmentation/{multilatency_streaming_speaker_diarization.py => multilatency_speaker_diarization.py} (95%) diff --git a/pyannote/audio/core/model.py b/pyannote/audio/core/model.py index bedb7f6c4..589d10a5c 100644 --- a/pyannote/audio/core/model.py +++ b/pyannote/audio/core/model.py @@ -199,7 +199,7 @@ def __example_output( specifications: Specifications = None, ) -> Output: if specifications.resolution == Resolution.FRAME: - _, num_frames, dimension = example_output.shape + num_frames, dimension = example_output.shape[-2], example_output.shape[-1] frame_duration = specifications.duration / num_frames frames = SlidingWindow(step=frame_duration, duration=frame_duration) else: diff --git a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py index d1584857f..359ac9f5d 100644 --- a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py +++ b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py @@ -82,8 +82,8 @@ class MultilatencyPyanNet(Model): LSTM_DEFAULTS = { "hidden_size": 128, "num_layers": 2, - "bidirectional": True, - "monolithic": False, + "bidirectional": False, + "monolithic": True, "dropout": 0.0, } LINEAR_DEFAULTS = {"hidden_size": 128, "num_layers": 2} @@ -95,11 +95,13 @@ def __init__( linear: dict = None, sample_rate: int = 16000, num_channels: int = 1, + latency_index: int = -1, task: Optional[Task] = None, latency_list: Optional[List[float]] = None, ): super().__init__(sample_rate=sample_rate, num_channels=num_channels, task=task) + self.latency_index = latency_index sincnet = merge_dict(self.SINCNET_DEFAULTS, sincnet) sincnet["sample_rate"] = sample_rate lstm = merge_dict(self.LSTM_DEFAULTS, lstm) @@ -113,7 +115,6 @@ def __init__( self.latency_list = self.hparams.latency_list self.sincnet = SincNet(**self.hparams.sincnet) - monolithic = lstm["monolithic"] if monolithic: multi_layer_lstm = dict(lstm) @@ -206,107 +207,123 @@ def forward(self, waveforms: torch.Tensor) -> torch.Tensor: if self.hparams.linear["num_layers"] > 0: for linear in self.linear: outputs = F.leaky_relu(linear(outputs)) - predictions = self.activation(self.classifier(outputs)) - predictions = predictions.view(predictions.size(0), predictions.size(1), predictions.size(2) // len(self.latency_list), len(self.latency_list)) - predictions = predictions.permute(3, 0, 1, 2) - - return predictions - - - def __example_input_array(self, duration: Optional[float] = None) -> torch.Tensor: - duration = duration or next(iter(self.specifications)).duration - return torch.randn( - ( - 1, - self.hparams.num_channels, - self.audio.get_num_samples(duration), - ), - device=self.device, - ) - - @property - def example_input_array(self) -> torch.Tensor: - return self.__example_input_array() - - - @cached_property - def example_output(self) -> Union[Output, Tuple[Output]]: - """Example output""" - example_input_array = self.__example_input_array() - with torch.inference_mode(): - example_output = self(example_input_array) - - def __example_output( - example_output: torch.Tensor, - specifications: Specifications = None, - ) -> Output: - if specifications.resolution == Resolution.FRAME: - _, _, num_frames, dimension = example_output.shape - frame_duration = specifications.duration / num_frames - frames = SlidingWindow(step=frame_duration, duration=frame_duration) - else: - _, dimension = example_output.shape - num_frames = None - frames = None - - return Output( - num_frames=num_frames, - dimension=dimension, - frames=frames, - ) - - return map_with_specifications( - self.specifications, __example_output, example_output - ) + # tensor of size (batch_size, num_frames, num_speakers * K) where K is the number of latencies + predictions = self.activation(self.classifier(outputs)) - def setup(self, stage=None): - if stage == "fit": - self.task.setup_metadata() - - # list of layers before adding task-dependent layers - before = set((name, id(module)) for name, module in self.named_modules()) - - # add task-dependent layers (e.g. final classification layer) - # and re-use original weights when compatible - - original_state_dict = self.state_dict() - self.build() + # tensor of size (batch_size, num_frames, num_speakers, K) + predictions = predictions.view(predictions.size(0), predictions.size(1), predictions.size(2) // len(self.latency_list), len(self.latency_list)) - try: - missing_keys, unexpected_keys = self.load_state_dict( - original_state_dict, strict=False - ) + # tensor of size (k, batch_size, num_frames, num_speakers) + predictions = predictions.permute(3, 0, 1, 2) - except RuntimeError as e: - if "size mismatch" in str(e): - msg = ( - "Model has been trained for a different task. For fine tuning or transfer learning, " - "it is recommended to train task-dependent layers for a few epochs " - f"before training the whole model: {self.task_dependent}." - ) - warnings.warn(msg) - else: - raise e - - # move layers that were added by build() to same device as the rest of the model - for name, module in self.named_modules(): - if (name, id(module)) not in before: - module.to(self.device) - - # add (trainable) loss function (e.g. ArcFace has its own set of trainable weights) - if stage == "fit": - # let task know about the model - self.task.model = self - # setup custom loss function - self.task.setup_loss_func() - # setup custom validation metrics - self.task.setup_validation_metric() - - # cache for later (and to avoid later CUDA error with multiprocessing) - _ = self.example_output - - # list of layers after adding task-dependent layers - after = set((name, id(module)) for name, module in self.named_modules()) - - # list of task-dependent layers - self.task_dependent = list(name for name, _ in after - before) \ No newline at end of file + if self.latency_index == -1: + # return all latencies + return predictions + + # return only the corresponding latency + return predictions[self.latency_index] + + + # def __example_input_array(self, duration: Optional[float] = None) -> torch.Tensor: + # duration = duration or next(iter(self.specifications)).duration + # return torch.randn( + # ( + # 1, + # self.hparams.num_channels, + # self.audio.get_num_samples(duration), + # ), + # device=self.device, + # ) + +# @property +# def example_input_array(self) -> torch.Tensor: +# return self.__example_input_array() + + +# @cached_property +# def example_output(self) -> Union[Output, Tuple[Output]]: +# """Example output""" +# example_input_array = self.__example_input_array() +# with torch.inference_mode(): +# example_output = self(example_input_array) + +# def __example_output( +# example_output: torch.Tensor, +# specifications: Specifications = None, +# ) -> Output: +# if specifications.resolution == Resolution.FRAME: +# num_frames, dimension = example_output.shape[-2], example_output.shape[-1] +# frame_duration = specifications.duration / num_frames +# frames = SlidingWindow(step=frame_duration, duration=frame_duration) +# else: +# _, dimension = example_output.shape +# num_frames = None +# frames = None + +# return Output( +# num_frames=num_frames, +# dimension=dimension, +# frames=frames, +# ) + +# return map_with_specifications( +# self.specifications, __example_output, example_output +# ) + +# def setup(self, stage=None): +# if stage == "fit": +# self.task.setup_metadata() + +# # list of layers before adding task-dependent layers +# before = set((name, id(module)) for name, module in self.named_modules()) + +# # add task-dependent layers (e.g. final classification layer) +# # and re-use original weights when compatible + +# original_state_dict = self.state_dict() +# self.build() + +# try: +# missing_keys, unexpected_keys = self.load_state_dict( +# original_state_dict, strict=False +# ) + +# except RuntimeError as e: +# if "size mismatch" in str(e): +# msg = ( +# "Model has been trained for a different task. For fine tuning or transfer learning, " +# "it is recommended to train task-dependent layers for a few epochs " +# f"before training the whole model: {self.task_dependent}." +# ) +# warnings.warn(msg) +# else: +# raise e + +# # move layers that were added by build() to same device as the rest of the model +# for name, module in self.named_modules(): +# if (name, id(module)) not in before: +# (trainable) loss function (e.g. ArcFace has its own set of trainable weights) +# if stage == "fit": +# … # list of layers after adding task-dependent layers +# after = set((name, id(module)) for name, module in self.named_modules()) + +# # list of task-dependent layers +# self.task_dependent = list(name for name, _ in after - before) module.to(self.device) + +# # add (trainable) loss function (e.g. ArcFace has its own set of trainable weights) +# if stage == "fit": +# # let task know about the model +# self.task.model = self +# # setup custom loss function +# self.task.setup_loss_func() +# # setup custom validation metrics +# self.task.setup_validation_metric() + +# # cache for later (and to avoid later CUDA error with multiprocessing) +# _ = self.example_output + +# # list of layers after adding task-dependent layers +# after = set((name, id(module)) for name, module in self.named_modules()) + +# # list of task-dependent layers +# self.task_dependent = list(name for name, _ in after - before) \ No newline at end of file diff --git a/pyannote/audio/tasks/__init__.py b/pyannote/audio/tasks/__init__.py index 3542e3fd1..ddce17d71 100644 --- a/pyannote/audio/tasks/__init__.py +++ b/pyannote/audio/tasks/__init__.py @@ -23,7 +23,7 @@ from .segmentation.multilabel import MultiLabelSegmentation # isort:skip from .segmentation.speaker_diarization import SpeakerDiarization # isort:skip from .segmentation.streaming_speaker_diarization import StreamingSpeakerDiarization # isort:skip -from .segmentation.multilatency_streaming_speaker_diarization import MultilatencyStreamingSpeakerDiarization # isort:skip +from .segmentation.multilatency_speaker_diarization import MultilatencySpeakerDiarization # isort:skip from .segmentation.voice_activity_detection import VoiceActivityDetection # isort:skip from .segmentation.overlapped_speech_detection import ( # isort:skip @@ -45,5 +45,5 @@ "MultiLabelSegmentation", "SpeakerEmbedding", "Segmentation", - "MultilatencyStreamingSpeakerDiarization", + "MultilatencySpeakerDiarization", ] diff --git a/pyannote/audio/tasks/segmentation/multilatency_streaming_speaker_diarization.py b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py similarity index 95% rename from pyannote/audio/tasks/segmentation/multilatency_streaming_speaker_diarization.py rename to pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py index 208931417..3f815389c 100644 --- a/pyannote/audio/tasks/segmentation/multilatency_streaming_speaker_diarization.py +++ b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py @@ -58,7 +58,7 @@ Scopes = list(Scope.__args__) -class MultilatencyStreamingSpeakerDiarization(SegmentationTaskMixin, Task): +class MultilatencySpeakerDiarization(SegmentationTaskMixin, Task): """Speaker diarization Parameters @@ -142,7 +142,6 @@ def __init__( metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None, max_num_speakers: int = None, # deprecated in favor of `max_speakers_per_chunk`` loss: Literal["bce", "mse"] = None, # deprecated - latency: float = 0.0, latency_list: List[float] = [0.0], ): @@ -188,7 +187,6 @@ def __init__( self.balance = balance self.weight = weight self.vad_loss = vad_loss - self.latency=latency self.latency_list=latency_list @@ -539,6 +537,7 @@ def training_step(self, batch, batch_idx: int): predictions = self.model(waveform) seg_loss = 0 for k in range(len(self.latency_list)): + # select one latency, then everything is identical to monolatency diarization prediction = predictions[k] batch_size, num_frames, _ = prediction.shape # (batch_size, num_frames, num_classes) @@ -557,23 +556,23 @@ def training_step(self, batch, batch_idx: int): warm_up_right = round(self.warm_up[1] / self.duration * num_frames) weight[:, num_frames - warm_up_right :] = 0.0 - delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down - - prediction = prediction[:, delay:, :] - target = target[:, :num_frames-delay, :] - - #future - # prediction = prediction[:, :num_frames-delay, :] - # target = target[:, delay:, :] - - + # shift predictions and targets + if self.latency_list[k] >= 0: + delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down + prediction = prediction[:, delay:, :] + reference = target[:, :num_frames-delay, :] + else: + delay = int(np.floor(num_frames * (-1 * self.latency_list[k]) / self.duration)) # round down + prediction = prediction[:, :num_frames-delay, :] + reference = target[:, delay:, :] if self.specifications.powerset: multilabel = self.model.powerset.to_multilabel(prediction) - permutated_target, _ = permutate(multilabel, target) + permutated_target, _ = permutate(multilabel, reference) permutated_target_powerset = self.model.powerset.to_powerset( permutated_target.float() ) + # add all losses seg_loss += self.segmentation_loss( prediction, permutated_target_powerset, weight=weight ) @@ -701,14 +700,14 @@ def validation_step(self, batch, batch_idx: int): warm_up_right = round(self.warm_up[1] / self.duration * num_frames) weight[:, num_frames - warm_up_right :] = 0.0 - delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down - - prediction = prediction[:, delay:, :] - reference = target[:, :num_frames-delay, :] - - #future - # prediction = prediction[:, :num_frames-delay, :] - # target = target[:, delay:, :] + if self.latency_list[k] >= 0: + delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down + prediction = prediction[:, delay:, :] + reference = target[:, :num_frames-delay, :] + else: + delay = int(np.floor(num_frames * (-1*self.latency_list[k]) / self.duration)) # round down + prediction = prediction[:, :num_frames-delay, :] + reference = target[:, delay:, :] if self.specifications.powerset: multilabel = self.model.powerset.to_multilabel(prediction) @@ -729,9 +728,8 @@ def validation_step(self, batch, batch_idx: int): permutated_prediction, reference, weight=weight )) - target = target[:, :num_frames-delay, :] - + multilabel = self.model.powerset.to_multilabel(predictions[0]) seg_loss = torch.sum(torch.tensor(losses)) self.model.log( From 770ac8a086fa76f19304ace7baf4b515bab7f2fd Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Mon, 25 Mar 2024 16:24:04 +0100 Subject: [PATCH 15/23] add a 'streaming' flag to SincNet --- pyannote/audio/models/blocks/sincnet.py | 53 +++++++++++++++++-------- 1 file changed, 36 insertions(+), 17 deletions(-) diff --git a/pyannote/audio/models/blocks/sincnet.py b/pyannote/audio/models/blocks/sincnet.py index 65bd6e57f..786d7eab8 100644 --- a/pyannote/audio/models/blocks/sincnet.py +++ b/pyannote/audio/models/blocks/sincnet.py @@ -31,7 +31,7 @@ class SincNet(nn.Module): - def __init__(self, sample_rate: int = 16000, stride: int = 1): + def __init__(self, sample_rate: int = 16000, stride: int = 1, streaming: bool = False): super().__init__() if sample_rate != 16000: @@ -40,12 +40,14 @@ def __init__(self, sample_rate: int = 16000, stride: int = 1): # kernel_size by (sample_rate / 16000). but this needs to be double-checked. self.stride = stride - - self.wav_norm1d = nn.InstanceNorm1d(1, affine=True) + self.streaming = streaming + if self.streaming == False: + self.wav_norm1d = nn.InstanceNorm1d(1, affine=True) self.conv1d = nn.ModuleList() self.pool1d = nn.ModuleList() - self.norm1d = nn.ModuleList() + if self.streaming == False: + self.norm1d = nn.ModuleList() self.conv1d.append( Encoder( @@ -60,15 +62,18 @@ def __init__(self, sample_rate: int = 16000, stride: int = 1): ) ) self.pool1d.append(nn.MaxPool1d(3, stride=3, padding=0, dilation=1)) - self.norm1d.append(nn.InstanceNorm1d(80, affine=True)) + if self.streaming == False: + self.norm1d.append(nn.InstanceNorm1d(80, affine=True)) self.conv1d.append(nn.Conv1d(80, 60, 5, stride=1)) self.pool1d.append(nn.MaxPool1d(3, stride=3, padding=0, dilation=1)) - self.norm1d.append(nn.InstanceNorm1d(60, affine=True)) + if self.streaming == False: + self.norm1d.append(nn.InstanceNorm1d(60, affine=True)) self.conv1d.append(nn.Conv1d(60, 60, 5, stride=1)) self.pool1d.append(nn.MaxPool1d(3, stride=3, padding=0, dilation=1)) - self.norm1d.append(nn.InstanceNorm1d(60, affine=True)) + if self.streaming == False: + self.norm1d.append(nn.InstanceNorm1d(60, affine=True)) def forward(self, waveforms: torch.Tensor) -> torch.Tensor: """Pass forward @@ -77,19 +82,33 @@ def forward(self, waveforms: torch.Tensor) -> torch.Tensor: ---------- waveforms : (batch, channel, sample) """ + if self.streaming == False: + outputs = self.wav_norm1d(waveforms) + for c, (conv1d, pool1d, norm1d) in enumerate( + zip(self.conv1d, self.pool1d, self.norm1d) + ): + + outputs = conv1d(outputs) + + # https://github.com/mravanelli/SincNet/issues/4 + if c == 0: + outputs = torch.abs(outputs) - outputs = self.wav_norm1d(waveforms) + outputs = F.leaky_relu(norm1d(pool1d(outputs))) + return outputs - for c, (conv1d, pool1d, norm1d) in enumerate( - zip(self.conv1d, self.pool1d, self.norm1d) - ): + else: + outputs = waveforms + for c, (conv1d, pool1d) in enumerate( + zip(self.conv1d, self.pool1d) + ): - outputs = conv1d(outputs) + outputs = conv1d(outputs) - # https://github.com/mravanelli/SincNet/issues/4 - if c == 0: - outputs = torch.abs(outputs) + # https://github.com/mravanelli/SincNet/issues/4 + if c == 0: + outputs = torch.abs(outputs) - outputs = F.leaky_relu(norm1d(pool1d(outputs))) + outputs = F.leaky_relu(pool1d(outputs)) - return outputs + return outputs From 542d104c0ebbbd6377e17ba160b7d4e7ff2c9334 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Mon, 25 Mar 2024 16:28:45 +0100 Subject: [PATCH 16/23] change the MultilatencyPyanNet output structure from (num_latencies, batch_size, num_frames, num_classes) to (batch_size, num_frames, num_classes x num_latencies) --- .../segmentation/MultilatencyPyanNet.py | 15 ++-- .../multilatency_speaker_diarization.py | 69 ++++++++----------- 2 files changed, 37 insertions(+), 47 deletions(-) diff --git a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py index 359ac9f5d..316216a47 100644 --- a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py +++ b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py @@ -114,7 +114,7 @@ def __init__( else: self.latency_list = self.hparams.latency_list - self.sincnet = SincNet(**self.hparams.sincnet) + self.sincnet = SincNet(**self.hparams.sincnet, streaming=True) monolithic = lstm["monolithic"] if monolithic: multi_layer_lstm = dict(lstm) @@ -209,19 +209,20 @@ def forward(self, waveforms: torch.Tensor) -> torch.Tensor: outputs = F.leaky_relu(linear(outputs)) # tensor of size (batch_size, num_frames, num_speakers * K) where K is the number of latencies predictions = self.activation(self.classifier(outputs)) + num_classes_powerset = predictions.size(2) //len(self.latency_list) + # # tensor of size (batch_size, num_frames, num_speakers, K) + # predictions = predictions.view(predictions.size(0), predictions.size(1), predictions.size(2) // len(self.latency_list), len(self.latency_list)) - # tensor of size (batch_size, num_frames, num_speakers, K) - predictions = predictions.view(predictions.size(0), predictions.size(1), predictions.size(2) // len(self.latency_list), len(self.latency_list)) - - # tensor of size (k, batch_size, num_frames, num_speakers) - predictions = predictions.permute(3, 0, 1, 2) + # # tensor of size (k, batch_size, num_frames, num_speakers) + # predictions = predictions.permute(3, 0, 1, 2) if self.latency_index == -1: # return all latencies return predictions # return only the corresponding latency - return predictions[self.latency_index] + return predictions[:,:,self.latency_index*num_classes_powerset:self.latency_index*num_classes_powerset+num_classes_powerset] + # def __example_input_array(self, duration: Optional[float] = None) -> torch.Tensor: diff --git a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py index 3f815389c..25c004c34 100644 --- a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py +++ b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py @@ -60,7 +60,6 @@ class MultilatencySpeakerDiarization(SegmentationTaskMixin, Task): """Speaker diarization - Parameters ---------- protocol : SpeakerDiarizationProtocol @@ -110,18 +109,15 @@ class MultilatencySpeakerDiarization(SegmentationTaskMixin, Task): metric : optional Validation metric(s). Can be anything supported by torchmetrics.MetricCollection. Defaults to AUROC (area under the ROC curve). - References ---------- Hervé Bredin and Antoine Laurent "End-To-End Speaker Segmentation for Overlap-Aware Resegmentation." Proc. Interspeech 2021 - Zhihao Du, Shiliang Zhang, Siqi Zheng, and Zhijie Yan "Speaker Embedding-aware Neural Diarization: an Efficient Framework for Overlapping Speech Diarization in Meeting Scenarios" https://arxiv.org/abs/2203.09767 - """ def __init__( @@ -142,6 +138,7 @@ def __init__( metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None, max_num_speakers: int = None, # deprecated in favor of `max_speakers_per_chunk`` loss: Literal["bce", "mse"] = None, # deprecated + latency: float = 0.0, latency_list: List[float] = [0.0], ): @@ -187,6 +184,7 @@ def __init__( self.balance = balance self.weight = weight self.vad_loss = vad_loss + self.latency=latency self.latency_list=latency_list @@ -296,7 +294,6 @@ def setup_loss_func(self): def prepare_chunk(self, file_id: int, start_time: float, duration: float): """Prepare chunk - Parameters ---------- file_id : int @@ -305,7 +302,6 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float): Chunk start time duration : float Chunk duration. - Returns ------- sample : dict @@ -376,13 +372,11 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float): def collate_y(self, batch) -> torch.Tensor: """ - Parameters ---------- batch : list List of samples to collate. "y" field is expected to be a SlidingWindowFeature. - Returns ------- y : torch.Tensor @@ -427,7 +421,6 @@ def segmentation_loss( weight: torch.Tensor = None, ) -> torch.Tensor: """Permutation-invariant segmentation loss - Parameters ---------- permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor @@ -436,7 +429,6 @@ def segmentation_loss( Speaker activity. weight : (batch_size, num_frames, 1) torch.Tensor, optional Frames weight. - Returns ------- seg_loss : torch.Tensor @@ -470,7 +462,6 @@ def voice_activity_detection_loss( weight: torch.Tensor = None, ) -> torch.Tensor: """Voice activity detection loss - Parameters ---------- permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor @@ -479,7 +470,6 @@ def voice_activity_detection_loss( Speaker activity. weight : (batch_size, num_frames, 1) torch.Tensor, optional Frames weight. - Returns ------- vad_loss : torch.Tensor @@ -502,14 +492,12 @@ def voice_activity_detection_loss( def training_step(self, batch, batch_idx: int): """Compute permutation-invariant segmentation loss - Parameters ---------- batch : (usually) dict of torch.Tensor Current batch. batch_idx: int Batch index. - Returns ------- loss : {str: torch.tensor} @@ -535,10 +523,10 @@ def training_step(self, batch, batch_idx: int): # forward pass predictions = self.model(waveform) + num_classes_powerset = predictions.size(2) //len(self.latency_list) seg_loss = 0 for k in range(len(self.latency_list)): - # select one latency, then everything is identical to monolatency diarization - prediction = predictions[k] + prediction = predictions[:,:,k*num_classes_powerset:k*num_classes_powerset+num_classes_powerset] batch_size, num_frames, _ = prediction.shape # (batch_size, num_frames, num_classes) @@ -556,23 +544,23 @@ def training_step(self, batch, batch_idx: int): warm_up_right = round(self.warm_up[1] / self.duration * num_frames) weight[:, num_frames - warm_up_right :] = 0.0 - # shift predictions and targets - if self.latency_list[k] >= 0: - delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down - prediction = prediction[:, delay:, :] - reference = target[:, :num_frames-delay, :] - else: - delay = int(np.floor(num_frames * (-1 * self.latency_list[k]) / self.duration)) # round down - prediction = prediction[:, :num_frames-delay, :] - reference = target[:, delay:, :] + delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down + + prediction = prediction[:, delay:, :] + target = target[:, :num_frames-delay, :] + + #future + # prediction = prediction[:, :num_frames-delay, :] + # target = target[:, delay:, :] + + if self.specifications.powerset: multilabel = self.model.powerset.to_multilabel(prediction) - permutated_target, _ = permutate(multilabel, reference) + permutated_target, _ = permutate(multilabel, target) permutated_target_powerset = self.model.powerset.to_powerset( permutated_target.float() ) - # add all losses seg_loss += self.segmentation_loss( prediction, permutated_target_powerset, weight=weight ) @@ -582,7 +570,7 @@ def training_step(self, batch, batch_idx: int): seg_loss += self.segmentation_loss( permutated_prediction, target, weight=weight ) - + self.model.log( "loss/train/segmentation", @@ -659,7 +647,6 @@ def default_metric( # TODO: no need to compute gradient in this method def validation_step(self, batch, batch_idx: int): """Compute validation loss and metric - Parameters ---------- batch : dict of torch.Tensor @@ -682,8 +669,9 @@ def validation_step(self, batch, batch_idx: int): # forward pass predictions = self.model(waveform) losses=[] + num_classes_powerset = predictions.size(2) //len(self.latency_list) for k in range(len(self.latency_list)): - prediction = predictions[k] + prediction = predictions[:,:,k*num_classes_powerset:k*num_classes_powerset+num_classes_powerset] batch_size, num_frames, _ = prediction.shape # frames weight @@ -700,14 +688,14 @@ def validation_step(self, batch, batch_idx: int): warm_up_right = round(self.warm_up[1] / self.duration * num_frames) weight[:, num_frames - warm_up_right :] = 0.0 - if self.latency_list[k] >= 0: - delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down - prediction = prediction[:, delay:, :] - reference = target[:, :num_frames-delay, :] - else: - delay = int(np.floor(num_frames * (-1*self.latency_list[k]) / self.duration)) # round down - prediction = prediction[:, :num_frames-delay, :] - reference = target[:, delay:, :] + delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down + + prediction = prediction[:, delay:, :] + reference = target[:, :num_frames-delay, :] + + #future + # prediction = prediction[:, :num_frames-delay, :] + # target = target[:, delay:, :] if self.specifications.powerset: multilabel = self.model.powerset.to_multilabel(prediction) @@ -728,8 +716,9 @@ def validation_step(self, batch, batch_idx: int): permutated_prediction, reference, weight=weight )) + target = target[:, :num_frames-delay, :] + - multilabel = self.model.powerset.to_multilabel(predictions[0]) seg_loss = torch.sum(torch.tensor(losses)) self.model.log( @@ -916,4 +905,4 @@ def progress_hook(completed: int = None, total: int = None): if __name__ == "__main__": import typer - typer.run(main) + typer.run(main) \ No newline at end of file From e90bfb43879c332ed01bedda12a7f63dc40cfe52 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Mon, 25 Mar 2024 16:35:02 +0100 Subject: [PATCH 17/23] add a StreamingInference class that concatenate the end of chunks instead of aggregating them --- pyannote/audio/__init__.py | 4 +- pyannote/audio/core/guided_inference.py | 847 ------------------ pyannote/audio/core/inference.py | 86 +- pyannote/audio/core/model.py | 2 +- .../segmentation/MultilatencyPyanNet.py | 108 +-- 5 files changed, 5 insertions(+), 1042 deletions(-) delete mode 100644 pyannote/audio/core/guided_inference.py diff --git a/pyannote/audio/__init__.py b/pyannote/audio/__init__.py index 004f5da32..827cb1473 100644 --- a/pyannote/audio/__init__.py +++ b/pyannote/audio/__init__.py @@ -27,9 +27,9 @@ from .core.inference import Inference -from .core.guided_inference import GuidedInference +from .core.streaming_inference import StreamingInference from .core.io import Audio from .core.model import Model from .core.pipeline import Pipeline -__all__ = ["Audio", "Model", "Inference", "Pipeline", "GuidedInference"] +__all__ = ["Audio", "Model", "Inference", "Pipeline", "StreamingInference"] diff --git a/pyannote/audio/core/guided_inference.py b/pyannote/audio/core/guided_inference.py deleted file mode 100644 index df7ec9899..000000000 --- a/pyannote/audio/core/guided_inference.py +++ /dev/null @@ -1,847 +0,0 @@ -# MIT License -# -# Copyright (c) 2020- CNRS -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import math -import warnings -from pathlib import Path -from typing import Callable, List, Optional, Text, Tuple, Union - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from einops import rearrange -from pyannote.core import Segment, SlidingWindow, SlidingWindowFeature -from pytorch_lightning.utilities.memory import is_oom_error - -from pyannote.audio.core.io import AudioFile -from pyannote.audio.core.model import Model, Specifications -from pyannote.audio.core.task import Resolution -from pyannote.audio.utils.multi_task import map_with_specifications -from pyannote.audio.utils.permutation import mae_cost_func, permutate -from pyannote.audio.utils.powerset import Powerset -from pyannote.audio.utils.reproducibility import fix_reproducibility - - -class BaseInference: - pass - - -class GuidedInference(BaseInference): - """Inference - - Parameters - ---------- - model : Model - Model. Will be automatically set to eval() mode and moved to `device` when provided. - window : {"sliding", "whole"}, optional - Use a "sliding" window and aggregate the corresponding outputs (default) - or just one (potentially long) window covering the "whole" file or chunk. - duration : float, optional - Chunk duration, in seconds. Defaults to duration used for training the model. - Has no effect when `window` is "whole". - step : float, optional - Step between consecutive chunks, in seconds. Defaults to warm-up duration when - greater than 0s, otherwise 10% of duration. Has no effect when `window` is "whole". - pre_aggregation_hook : callable, optional - When a callable is provided, it is applied to the model output, just before aggregation. - Takes a (num_chunks, num_frames, dimension) numpy array as input and returns a modified - (num_chunks, num_frames, other_dimension) numpy array passed to overlap-add aggregation. - skip_aggregation : bool, optional - Do not aggregate outputs when using "sliding" window. Defaults to False. - skip_conversion: bool, optional - In case a task has been trained with `powerset` mode, output is automatically - converted to `multi-label`, unless `skip_conversion` is set to True. - batch_size : int, optional - Batch size. Larger values (should) make inference faster. Defaults to 32. - device : torch.device, optional - Device used for inference. Defaults to `model.device`. - In case `device` and `model.device` are different, model is sent to device. - use_auth_token : str, optional - When loading a private huggingface.co model, set `use_auth_token` - to True or to a string containing your hugginface.co authentication - token that can be obtained by running `huggingface-cli login` - """ - - def __init__( - self, - model: Union[Model, Text, Path], - window: Text = "sliding", - duration: float = None, - step: float = None, - pre_aggregation_hook: Callable[[np.ndarray], np.ndarray] = None, - skip_aggregation: bool = False, - skip_conversion: bool = False, - device: torch.device = None, - batch_size: int = 32, - use_auth_token: Union[Text, None] = None, - ): - # ~~~~ model ~~~~~ - - self.model = ( - model - if isinstance(model, Model) - else Model.from_pretrained( - model, - map_location=device, - strict=False, - use_auth_token=use_auth_token, - ) - ) - - if device is None: - device = self.model.device - self.device = device - - self.model.eval() - self.model.to(self.device) - - specifications = self.model.specifications - - # ~~~~ sliding window ~~~~~ - - if window not in ["sliding", "whole"]: - raise ValueError('`window` must be "sliding" or "whole".') - - if window == "whole" and any( - s.resolution == Resolution.FRAME for s in specifications - ): - warnings.warn( - 'Using "whole" `window` inference with a frame-based model might lead to bad results ' - 'and huge memory consumption: it is recommended to set `window` to "sliding".' - ) - self.window = window - - training_duration = next(iter(specifications)).duration - duration = duration or training_duration - if training_duration != duration: - warnings.warn( - f"Model was trained with {training_duration:g}s chunks, and you requested " - f"{duration:g}s chunks for inference: this might lead to suboptimal results." - ) - self.duration = duration - - # ~~~~ powerset to multilabel conversion ~~~~ - - self.skip_conversion = skip_conversion - - conversion = list() - for s in specifications: - if s.powerset and not skip_conversion: - c = Powerset(len(s.classes), s.powerset_max_classes) - else: - c = nn.Identity() - conversion.append(c.to(self.device)) - - if isinstance(specifications, Specifications): - self.conversion = conversion[0] - else: - self.conversion = nn.ModuleList(conversion) - - # ~~~~ overlap-add aggregation ~~~~~ - - self.skip_aggregation = skip_aggregation - self.pre_aggregation_hook = pre_aggregation_hook - - self.warm_up = next(iter(specifications)).warm_up - # Use that many seconds on the left- and rightmost parts of each chunk - # to warm up the model. While the model does process those left- and right-most - # parts, only the remaining central part of each chunk is used for aggregating - # scores during inference. - - # step between consecutive chunks - step = step or ( - 0.1 * self.duration if self.warm_up[0] == 0.0 else self.warm_up[0] - ) - - if step > self.duration: - raise ValueError( - f"Step between consecutive chunks is set to {step:g}s, while chunks are " - f"only {self.duration:g}s long, leading to gaps between consecutive chunks. " - f"Either decrease step or increase duration." - ) - self.step = step - - self.batch_size = batch_size - - def to(self, device: torch.device) -> "Inference": - """Send internal model to `device`""" - - if not isinstance(device, torch.device): - raise TypeError( - f"`device` must be an instance of `torch.device`, got `{type(device).__name__}`" - ) - - self.model.to(device) - self.conversion.to(device) - self.device = device - return self - - def infer(self, chunks: torch.Tensor) -> Union[np.ndarray, Tuple[np.ndarray]]: - """Forward pass - - Takes care of sending chunks to right device and outputs back to CPU - - Parameters - ---------- - chunks : (batch_size, num_channels, num_samples) torch.Tensor - Batch of audio chunks. - - Returns - ------- - outputs : (tuple of) (batch_size, ...) np.ndarray - Model output. - """ - - with torch.inference_mode(): - try: - outputs = self.model(chunks.to(self.device)) - except RuntimeError as exception: - if is_oom_error(exception): - raise MemoryError( - f"batch_size ({self.batch_size: d}) is probably too large. " - f"Try with a smaller value until memory error disappears." - ) - else: - raise exception - - def __convert(output: torch.Tensor, conversion: nn.Module, **kwargs): - return conversion(output).cpu().numpy() - - return map_with_specifications( - self.model.specifications, __convert, outputs, self.conversion - ) - - def slide( - self, - waveform: torch.Tensor, - sample_rate: int, - hook: Optional[Callable], - ) -> Union[SlidingWindowFeature, Tuple[SlidingWindowFeature]]: - """Slide model on a waveform - - Parameters - ---------- - waveform: (num_channels, num_samples) torch.Tensor - Waveform. - sample_rate : int - Sample rate. - hook: Optional[Callable] - When a callable is provided, it is called everytime a batch is - processed with two keyword arguments: - - `completed`: the number of chunks that have been processed so far - - `total`: the total number of chunks - - Returns - ------- - output : (tuple of) SlidingWindowFeature - Model output. Shape is (num_chunks, dimension) for chunk-level tasks, - and (num_frames, dimension) for frame-level tasks. - """ - step_frames = self.model.example_output.frames.closest_frame(self.step) - window_size: int = self.model.audio.get_num_samples(self.duration) - step_size: int = round(self.step * sample_rate) - _, num_samples = waveform.shape - - def __frames( - example_output, specifications: Optional[Specifications] = None - ) -> SlidingWindow: - if specifications.resolution == Resolution.CHUNK: - return SlidingWindow(start=0.0, duration=self.duration, step=self.step) - return example_output.frames - - frames: Union[SlidingWindow, Tuple[SlidingWindow]] = map_with_specifications( - self.model.specifications, - __frames, - self.model.example_output, - ) - - # prepare complete chunks - if num_samples >= window_size: - chunks: torch.Tensor = rearrange( - waveform.unfold(1, window_size, step_size), - "channel chunk frame -> chunk channel frame", - ) - num_chunks, _, _ = chunks.shape - else: - num_chunks = 0 - - # prepare last incomplete chunk - has_last_chunk = (num_samples < window_size) or ( - num_samples - window_size - ) % step_size > 0 - if has_last_chunk: - # pad last chunk with zeros - last_chunk: torch.Tensor = waveform[:, num_chunks * step_size :] - _, last_window_size = last_chunk.shape - last_pad = window_size - last_window_size - last_chunk = F.pad(last_chunk, (0, last_pad)) - - def __empty_list(**kwargs): - return list() - - outputs: Union[ - List[np.ndarray], Tuple[List[np.ndarray]] - ] = map_with_specifications(self.model.specifications, __empty_list) - - if hook is not None: - hook(completed=0, total=num_chunks + has_last_chunk) - - def __append_batch(output, batch_output, **kwargs) -> None: - output.append(batch_output) - return - - # slide over audio chunks in batch - _ = map_with_specifications( - self.model.specifications, __append_batch, outputs, self.model(chunks[0].to(self.device)).detach().cpu().numpy() - ) - - num_frames = outputs[0].shape[1] - num_speakers = outputs[0].shape[2] - guide = torch.log(torch.full((1,num_frames,num_speakers), fill_value=1/num_speakers, device=self.device)) - for c in np.arange(1, num_chunks): - guide[:,:num_frames-2*step_frames] = torch.from_numpy(outputs[c-1][:,step_frames:num_frames-step_frames]) - batch_outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.model(chunks[c].to(self.device), guide) - - _ = map_with_specifications( - self.model.specifications, __append_batch, outputs, batch_outputs.detach().cpu().numpy() - ) - - if hook is not None: - hook(completed=c + self.batch_size, total=num_chunks + has_last_chunk) - - # process orphan last chunk - if has_last_chunk: - guide[:,:num_frames-2*step_frames] = torch.from_numpy(outputs[c-1][:,step_frames:num_frames-step_frames]) - last_outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.model(last_chunk[None].to(self.device), guide) - - _ = map_with_specifications( - self.model.specifications, __append_batch, outputs, last_outputs.detach().cpu().numpy() - ) - - if hook is not None: - hook( - completed=num_chunks + has_last_chunk, - total=num_chunks + has_last_chunk, - ) - - def __vstack(output: List[np.ndarray], **kwargs) -> np.ndarray: - return np.vstack(output) - - outputs: Union[np.ndarray, Tuple[np.ndarray]] = map_with_specifications( - self.model.specifications, __vstack, outputs - ) - - powerset = Powerset(len(self.model.specifications.classes),num_speakers) - outputs = powerset.to_multilabel(torch.from_numpy(outputs)) - outputs = outputs.detach().cpu().numpy() - - def __aggregate( - outputs: np.ndarray, - frames: SlidingWindow, - specifications: Optional[Specifications] = None, - ) -> SlidingWindowFeature: - # skip aggregation when requested, - # or when model outputs just one vector per chunk - # or when model is permutation-invariant (and not post-processed) - if ( - self.skip_aggregation - or specifications.resolution == Resolution.CHUNK - or ( - specifications.permutation_invariant - and self.pre_aggregation_hook is None - ) - ): - frames = SlidingWindow( - start=0.0, duration=self.duration, step=self.step - ) - return SlidingWindowFeature(outputs, frames) - - if self.pre_aggregation_hook is not None: - outputs = self.pre_aggregation_hook(outputs) - - aggregated = self.aggregate( - SlidingWindowFeature( - outputs, - SlidingWindow(start=0.0, duration=self.duration, step=self.step), - ), - frames=frames, - warm_up=self.warm_up, - hamming=True, - missing=0.0, - ) - - # remove padding that was added to last chunk - if has_last_chunk: - aggregated.data = aggregated.crop( - Segment(0.0, num_samples / sample_rate), mode="loose" - ) - - return aggregated - - return map_with_specifications( - self.model.specifications, __aggregate, outputs, frames - ) - - def __call__( - self, file: AudioFile, hook: Optional[Callable] = None - ) -> Union[ - Tuple[Union[SlidingWindowFeature, np.ndarray]], - Union[SlidingWindowFeature, np.ndarray], - ]: - """Run inference on a whole file - - Parameters - ---------- - file : AudioFile - Audio file. - hook : callable, optional - When a callable is provided, it is called everytime a batch is processed - with two keyword arguments: - - `completed`: the number of chunks that have been processed so far - - `total`: the total number of chunks - - Returns - ------- - output : (tuple of) SlidingWindowFeature or np.ndarray - Model output, as `SlidingWindowFeature` if `window` is set to "sliding" - and `np.ndarray` if is set to "whole". - - """ - - fix_reproducibility(self.device) - - waveform, sample_rate = self.model.audio(file) - - if self.window == "sliding": - return self.slide(waveform, sample_rate, hook=hook) - - outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.infer(waveform[None]) - - def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray: - return outputs[0] - - return map_with_specifications( - self.model.specifications, __first_sample, outputs - ) - - def crop( - self, - file: AudioFile, - chunk: Union[Segment, List[Segment]], - duration: Optional[float] = None, - hook: Optional[Callable] = None, - ) -> Union[ - Tuple[Union[SlidingWindowFeature, np.ndarray]], - Union[SlidingWindowFeature, np.ndarray], - ]: - """Run inference on a chunk or a list of chunks - - Parameters - ---------- - file : AudioFile - Audio file. - chunk : Segment or list of Segment - Apply model on this chunk. When a list of chunks is provided and - window is set to "sliding", this is equivalent to calling crop on - the smallest chunk that contains all chunks. In case window is set - to "whole", this is equivalent to concatenating each chunk into one - (artifical) chunk before processing it. - duration : float, optional - Enforce chunk duration (in seconds). This is a hack to avoid rounding - errors that may result in a different number of audio samples for two - chunks of the same duration. - hook : callable, optional - When a callable is provided, it is called everytime a batch is processed - with two keyword arguments: - - `completed`: the number of chunks that have been processed so far - - `total`: the total number of chunks - - Returns - ------- - output : (tuple of) SlidingWindowFeature or np.ndarray - Model output, as `SlidingWindowFeature` if `window` is set to "sliding" - and `np.ndarray` if is set to "whole". - - Notes - ----- - If model needs to be warmed up, remember to extend the requested chunk with the - corresponding amount of time so that it is actually warmed up when processing the - chunk of interest: - >>> chunk_of_interest = Segment(10, 15) - >>> extended_chunk = Segment(10 - warm_up, 15 + warm_up) - >>> inference.crop(file, extended_chunk).crop(chunk_of_interest, returns_data=False) - """ - - fix_reproducibility(self.device) - - if self.window == "sliding": - if not isinstance(chunk, Segment): - start = min(c.start for c in chunk) - end = max(c.end for c in chunk) - chunk = Segment(start=start, end=end) - - waveform, sample_rate = self.model.audio.crop( - file, chunk, duration=duration - ) - outputs: Union[ - SlidingWindowFeature, Tuple[SlidingWindowFeature] - ] = self.slide(waveform, sample_rate, hook=hook) - - def __shift(output: SlidingWindowFeature, **kwargs) -> SlidingWindowFeature: - frames = output.sliding_window - shifted_frames = SlidingWindow( - start=chunk.start, duration=frames.duration, step=frames.step - ) - return SlidingWindowFeature(output.data, shifted_frames) - - return map_with_specifications(self.model.specifications, __shift, outputs) - - if isinstance(chunk, Segment): - waveform, sample_rate = self.model.audio.crop( - file, chunk, duration=duration - ) - else: - waveform = torch.cat( - [self.model.audio.crop(file, c)[0] for c in chunk], dim=1 - ) - - outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.infer(waveform[None]) - - def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray: - return outputs[0] - - return map_with_specifications( - self.model.specifications, __first_sample, outputs - ) - - @staticmethod - def aggregate( - scores: SlidingWindowFeature, - frames: SlidingWindow = None, - warm_up: Tuple[float, float] = (0.0, 0.0), - epsilon: float = 1e-12, - hamming: bool = False, - missing: float = np.NaN, - skip_average: bool = False, - ) -> SlidingWindowFeature: - """Aggregation - - Parameters - ---------- - scores : SlidingWindowFeature - Raw (unaggregated) scores. Shape is (num_chunks, num_frames_per_chunk, num_classes). - frames : SlidingWindow, optional - Frames resolution. Defaults to estimate it automatically based on `scores` shape - and chunk size. Providing the exact frame resolution (when known) leads to better - temporal precision. - warm_up : (float, float) tuple, optional - Left/right warm up duration (in seconds). - missing : float, optional - Value used to replace missing (ie all NaNs) values. - skip_average : bool, optional - Skip final averaging step. - - Returns - ------- - aggregated_scores : SlidingWindowFeature - Aggregated scores. Shape is (num_frames, num_classes) - """ - - num_chunks, num_frames_per_chunk, num_classes = scores.data.shape - - chunks = scores.sliding_window - if frames is None: - duration = step = chunks.duration / num_frames_per_chunk - frames = SlidingWindow(start=chunks.start, duration=duration, step=step) - else: - frames = SlidingWindow( - start=chunks.start, - duration=frames.duration, - step=frames.step, - ) - - masks = 1 - np.isnan(scores) - scores.data = np.nan_to_num(scores.data, copy=True, nan=0.0) - - # Hamming window used for overlap-add aggregation - hamming_window = ( - np.hamming(num_frames_per_chunk).reshape(-1, 1) - if hamming - else np.ones((num_frames_per_chunk, 1)) - ) - - # anything before warm_up_left (and after num_frames_per_chunk - warm_up_right) - # will not be used in the final aggregation - - # warm-up windows used for overlap-add aggregation - warm_up_window = np.ones((num_frames_per_chunk, 1)) - # anything before warm_up_left will not contribute to aggregation - warm_up_left = round( - warm_up[0] / scores.sliding_window.duration * num_frames_per_chunk - ) - warm_up_window[:warm_up_left] = epsilon - # anything after num_frames_per_chunk - warm_up_right either - warm_up_right = round( - warm_up[1] / scores.sliding_window.duration * num_frames_per_chunk - ) - warm_up_window[num_frames_per_chunk - warm_up_right :] = epsilon - - # aggregated_output[i] will be used to store the sum of all predictions - # for frame #i - num_frames = ( - frames.closest_frame( - scores.sliding_window.start - + scores.sliding_window.duration - + (num_chunks - 1) * scores.sliding_window.step - ) - + 1 - ) - - print("inferencetot",num_frames) - - aggregated_output: np.ndarray = np.zeros( - (num_frames, num_classes), dtype=np.float32 - ) - - # overlapping_chunk_count[i] will be used to store the number of chunks - # that contributed to frame #i - overlapping_chunk_count: np.ndarray = np.zeros( - (num_frames, num_classes), dtype=np.float32 - ) - - # aggregated_mask[i] will be used to indicate whether - # at least one non-NAN frame contributed to frame #i - aggregated_mask: np.ndarray = np.zeros( - (num_frames, num_classes), dtype=np.float32 - ) - # loop on the scores of sliding chunks - for (chunk, score), (_, mask) in zip(scores, masks): - # chunk ~ Segment - # score ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray - # mask ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray - start_frame = frames.closest_frame(chunk.start) - if start_frame + num_frames_per_chunk > num_frames: - print("here") - else: - aggregated_output[start_frame : start_frame + num_frames_per_chunk] += ( - score * mask * hamming_window * warm_up_window - ) - - overlapping_chunk_count[ - start_frame : start_frame + num_frames_per_chunk - ] += (mask * hamming_window * warm_up_window) - - aggregated_mask[ - start_frame : start_frame + num_frames_per_chunk - ] = np.maximum( - aggregated_mask[start_frame : start_frame + num_frames_per_chunk], - mask, - ) - - if skip_average: - average = aggregated_output - else: - average = aggregated_output / np.maximum(overlapping_chunk_count, epsilon) - - average[aggregated_mask == 0.0] = missing - - return SlidingWindowFeature(average, frames) - - @staticmethod - def trim( - scores: SlidingWindowFeature, - warm_up: Tuple[float, float] = (0.1, 0.1), - ) -> SlidingWindowFeature: - """Trim left and right warm-up regions - - Parameters - ---------- - scores : SlidingWindowFeature - (num_chunks, num_frames, num_classes)-shaped scores. - warm_up : (float, float) tuple - Left/right warm up ratio of chunk duration. - Defaults to (0.1, 0.1), i.e. 10% on both sides. - - Returns - ------- - trimmed : SlidingWindowFeature - (num_chunks, trimmed_num_frames, num_speakers)-shaped scores - """ - - assert ( - scores.data.ndim == 3 - ), "Inference.trim expects (num_chunks, num_frames, num_classes)-shaped `scores`" - _, num_frames, _ = scores.data.shape - - chunks = scores.sliding_window - - num_frames_left = round(num_frames * warm_up[0]) - num_frames_right = round(num_frames * warm_up[1]) - - num_frames_step = round(num_frames * chunks.step / chunks.duration) - if num_frames - num_frames_left - num_frames_right < num_frames_step: - warnings.warn( - f"Total `warm_up` is so large ({sum(warm_up) * 100:g}% of each chunk) " - f"that resulting trimmed scores does not cover a whole step ({chunks.step:g}s)" - ) - new_data = scores.data[:, num_frames_left : num_frames - num_frames_right] - - new_chunks = SlidingWindow( - start=chunks.start + warm_up[0] * chunks.duration, - step=chunks.step, - duration=(1 - warm_up[0] - warm_up[1]) * chunks.duration, - ) - - return SlidingWindowFeature(new_data, new_chunks) - - @staticmethod - def stitch( - activations: SlidingWindowFeature, - frames: SlidingWindow = None, - lookahead: Optional[Tuple[int, int]] = None, - cost_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None, - match_func: Callable[[np.ndarray, np.ndarray, float], bool] = None, - ) -> SlidingWindowFeature: - """ - - Parameters - ---------- - activations : SlidingWindowFeature - (num_chunks, num_frames, num_classes)-shaped scores. - frames : SlidingWindow, optional - Frames resolution. Defaults to estimate it automatically based on `activations` - shape and chunk size. Providing the exact frame resolution (when known) leads to better - temporal precision. - lookahead : (int, int) tuple - Number of past and future adjacent chunks to use for stitching. - Defaults to (k, k) with k = chunk_duration / chunk_step - 1 - cost_func : callable - Cost function used to find the optimal mapping between two chunks. - Expects two (num_frames, num_classes) torch.tensor as input - and returns cost as a (num_classes, ) torch.tensor - Defaults to mean absolute error (utils.permutations.mae_cost_func) - match_func : callable - Function used to decide whether two speakers mapped by the optimal - mapping actually are a match. - Expects two (num_frames, ) np.ndarray and the cost (from cost_func) - and returns a boolean. Defaults to always returning True. - """ - - num_chunks, num_frames, num_classes = activations.data.shape - - chunks: SlidingWindow = activations.sliding_window - - if frames is None: - duration = step = chunks.duration / num_frames - frames = SlidingWindow(start=chunks.start, duration=duration, step=step) - else: - frames = SlidingWindow( - start=chunks.start, - duration=frames.duration, - step=frames.step, - ) - - max_lookahead = math.floor(chunks.duration / chunks.step - 1) - if lookahead is None: - lookahead = 2 * (max_lookahead,) - - assert all(L <= max_lookahead for L in lookahead) - - if cost_func is None: - cost_func = mae_cost_func - - if match_func is None: - - def always_match(this: np.ndarray, that: np.ndarray, cost: float): - return True - - match_func = always_match - - stitches = [] - for C, (chunk, activation) in enumerate(activations): - local_stitch = np.NAN * np.zeros( - (sum(lookahead) + 1, num_frames, num_classes) - ) - - for c in range( - max(0, C - lookahead[0]), min(num_chunks, C + lookahead[1] + 1) - ): - # extract common temporal support - shift = round((C - c) * num_frames * chunks.step / chunks.duration) - - if shift < 0: - shift = -shift - this_activations = activation[shift:] - that_activations = activations[c, : num_frames - shift] - else: - this_activations = activation[: num_frames - shift] - that_activations = activations[c, shift:] - - # find the optimal one-to-one mapping - _, (permutation,), (cost,) = permutate( - this_activations[np.newaxis], - that_activations, - cost_func=cost_func, - return_cost=True, - ) - - for this, that in enumerate(permutation): - # only stitch under certain condiditions - matching = (c == C) or ( - match_func( - this_activations[:, this], - that_activations[:, that], - cost[this, that], - ) - ) - - if matching: - local_stitch[c - C + lookahead[0], :, this] = activations[ - c, :, that - ] - - # TODO: do not lookahead further once a mismatch is found - - stitched_chunks = SlidingWindow( - start=chunk.start - lookahead[0] * chunks.step, - duration=chunks.duration, - step=chunks.step, - ) - - local_stitch = Inference.aggregate( - SlidingWindowFeature(local_stitch, stitched_chunks), - frames=frames, - hamming=True, - ) - - stitches.append(local_stitch.data) - - stitches = np.stack(stitches) - stitched_chunks = SlidingWindow( - start=chunks.start - lookahead[0] * chunks.step, - duration=chunks.duration + sum(lookahead) * chunks.step, - step=chunks.step, - ) - - return SlidingWindowFeature(stitches, stitched_chunks) - diff --git a/pyannote/audio/core/inference.py b/pyannote/audio/core/inference.py index c7c3ced2d..ee5d8040f 100644 --- a/pyannote/audio/core/inference.py +++ b/pyannote/audio/core/inference.py @@ -94,7 +94,6 @@ def __init__( device: torch.device = None, batch_size: int = 32, use_auth_token: Union[Text, None] = None, - latency_index: int = None, ): # ~~~~ model ~~~~~ @@ -140,7 +139,6 @@ def __init__( f"{duration:g}s chunks for inference: this might lead to suboptimal results." ) self.duration = duration - self.latency_index = latency_index # ~~~~ powerset to multilabel conversion ~~~~ @@ -229,9 +227,6 @@ def infer(self, chunks: torch.Tensor) -> Union[np.ndarray, Tuple[np.ndarray]]: def __convert(output: torch.Tensor, conversion: nn.Module, **kwargs): return conversion(output).cpu().numpy() - if self.latency_index is not None: - return map_with_specifications( - self.model.specifications, __convert, outputs[self.latency_index], self.conversion) return map_with_specifications( self.model.specifications, __convert, outputs, self.conversion @@ -561,7 +556,7 @@ def aggregate( aggregated_scores : SlidingWindowFeature Aggregated scores. Shape is (num_frames, num_classes) """ - + print("aggregate") num_chunks, num_frames_per_chunk, num_classes = scores.data.shape chunks = scores.sliding_window @@ -659,85 +654,6 @@ def aggregate( return SlidingWindowFeature(average, frames) - @staticmethod - def aggregate_end_chunk( - scores: SlidingWindowFeature, - frames: SlidingWindow = None, - warm_up: Tuple[float, float] = (0.0, 0.0), - epsilon: float = 1e-12, - hamming: bool = False, - missing: float = np.NaN, - skip_average: bool = False, - ) -> SlidingWindowFeature: - """Aggregation - - Parameters - ---------- - scores : SlidingWindowFeature - Raw (unaggregated) scores. Shape is (num_chunks, num_frames_per_chunk, num_classes). - frames : SlidingWindow, optional - Frames resolution. Defaults to estimate it automatically based on `scores` shape - and chunk size. Providing the exact frame resolution (when known) leads to better - temporal precision. - warm_up : (float, float) tuple, optional - Left/right warm up duration (in seconds). - missing : float, optional - Value used to replace missing (ie all NaNs) values. - skip_average : bool, optional - Skip final averaging step. - - Returns - ------- - aggregated_scores : SlidingWindowFeature - Aggregated scores. Shape is (num_frames, num_classes) - """ - - num_chunks, num_frames_per_chunk, num_classes = scores.data.shape - - chunks = scores.sliding_window - if frames is None: - duration = step = chunks.duration / num_frames_per_chunk - frames = SlidingWindow(start=chunks.start, duration=duration, step=step) - else: - frames = SlidingWindow( - start=chunks.start, - duration=frames.duration, - step=frames.step, - ) - masks = 1 - np.isnan(scores) - scores.data = np.nan_to_num(scores.data, copy=True, nan=0.0) - - # aggregated_output[i] will be used to store the sum of all predictions - # for frame #i - num_frames = ( - frames.closest_frame( - scores.sliding_window.start - + scores.sliding_window.duration - + (num_chunks - 1) * scores.sliding_window.step - ) - + 1 - ) - step_frames = frames.closest_frame(scores.sliding_window.step) - aggregated_output: np.ndarray = np.zeros( - (num_frames, num_classes), dtype=np.float32 - ) - aggregated_output[0 : num_frames_per_chunk-step_frames] = scores[0][:num_frames_per_chunk-step_frames] - end = scores.sliding_window.duration - scores.sliding_window.step - - # data = scores.data - # print(data.shape) - # data=data[1:] - # scores = scores[1:] - # loop on the scores of sliding chunks - for (chunk, score) in scores: - # chunk ~ Segment - # score ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray - # mask ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray - start_frame = frames.closest_frame(end) - aggregated_output[start_frame : start_frame + step_frames] = score[num_frames_per_chunk-step_frames:] - end = chunk.end - - return SlidingWindowFeature(aggregated_output, frames) @staticmethod def trim( diff --git a/pyannote/audio/core/model.py b/pyannote/audio/core/model.py index 589d10a5c..bedb7f6c4 100644 --- a/pyannote/audio/core/model.py +++ b/pyannote/audio/core/model.py @@ -199,7 +199,7 @@ def __example_output( specifications: Specifications = None, ) -> Output: if specifications.resolution == Resolution.FRAME: - num_frames, dimension = example_output.shape[-2], example_output.shape[-1] + _, num_frames, dimension = example_output.shape frame_duration = specifications.duration / num_frames frames = SlidingWindow(step=frame_duration, duration=frame_duration) else: diff --git a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py index 316216a47..cf716d5d1 100644 --- a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py +++ b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py @@ -221,110 +221,4 @@ def forward(self, waveforms: torch.Tensor) -> torch.Tensor: return predictions # return only the corresponding latency - return predictions[:,:,self.latency_index*num_classes_powerset:self.latency_index*num_classes_powerset+num_classes_powerset] - - - - # def __example_input_array(self, duration: Optional[float] = None) -> torch.Tensor: - # duration = duration or next(iter(self.specifications)).duration - # return torch.randn( - # ( - # 1, - # self.hparams.num_channels, - # self.audio.get_num_samples(duration), - # ), - # device=self.device, - # ) - -# @property -# def example_input_array(self) -> torch.Tensor: -# return self.__example_input_array() - - -# @cached_property -# def example_output(self) -> Union[Output, Tuple[Output]]: -# """Example output""" -# example_input_array = self.__example_input_array() -# with torch.inference_mode(): -# example_output = self(example_input_array) - -# def __example_output( -# example_output: torch.Tensor, -# specifications: Specifications = None, -# ) -> Output: -# if specifications.resolution == Resolution.FRAME: -# num_frames, dimension = example_output.shape[-2], example_output.shape[-1] -# frame_duration = specifications.duration / num_frames -# frames = SlidingWindow(step=frame_duration, duration=frame_duration) -# else: -# _, dimension = example_output.shape -# num_frames = None -# frames = None - -# return Output( -# num_frames=num_frames, -# dimension=dimension, -# frames=frames, -# ) - -# return map_with_specifications( -# self.specifications, __example_output, example_output -# ) - -# def setup(self, stage=None): -# if stage == "fit": -# self.task.setup_metadata() - -# # list of layers before adding task-dependent layers -# before = set((name, id(module)) for name, module in self.named_modules()) - -# # add task-dependent layers (e.g. final classification layer) -# # and re-use original weights when compatible - -# original_state_dict = self.state_dict() -# self.build() - -# try: -# missing_keys, unexpected_keys = self.load_state_dict( -# original_state_dict, strict=False -# ) - -# except RuntimeError as e: -# if "size mismatch" in str(e): -# msg = ( -# "Model has been trained for a different task. For fine tuning or transfer learning, " -# "it is recommended to train task-dependent layers for a few epochs " -# f"before training the whole model: {self.task_dependent}." -# ) -# warnings.warn(msg) -# else: -# raise e - -# # move layers that were added by build() to same device as the rest of the model -# for name, module in self.named_modules(): -# if (name, id(module)) not in before: -# (trainable) loss function (e.g. ArcFace has its own set of trainable weights) -# if stage == "fit": -# … # list of layers after adding task-dependent layers -# after = set((name, id(module)) for name, module in self.named_modules()) - -# # list of task-dependent layers -# self.task_dependent = list(name for name, _ in after - before) module.to(self.device) - -# # add (trainable) loss function (e.g. ArcFace has its own set of trainable weights) -# if stage == "fit": -# # let task know about the model -# self.task.model = self -# # setup custom loss function -# self.task.setup_loss_func() -# # setup custom validation metrics -# self.task.setup_validation_metric() - -# # cache for later (and to avoid later CUDA error with multiprocessing) -# _ = self.example_output - -# # list of layers after adding task-dependent layers -# after = set((name, id(module)) for name, module in self.named_modules()) - -# # list of task-dependent layers -# self.task_dependent = list(name for name, _ in after - before) \ No newline at end of file + return predictions[:,:,self.latency_index*num_classes_powerset:self.latency_index*num_classes_powerset+num_classes_powerset] \ No newline at end of file From 71043fc00d2301e9bfdf52004ad2d6a3f038083d Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Mon, 25 Mar 2024 16:59:56 +0100 Subject: [PATCH 18/23] add StreamingInference --- pyannote/audio/core/streaming_inference.py | 911 +++++++++++++++++++++ 1 file changed, 911 insertions(+) create mode 100644 pyannote/audio/core/streaming_inference.py diff --git a/pyannote/audio/core/streaming_inference.py b/pyannote/audio/core/streaming_inference.py new file mode 100644 index 000000000..9063a8c58 --- /dev/null +++ b/pyannote/audio/core/streaming_inference.py @@ -0,0 +1,911 @@ +# MIT License +# +# Copyright (c) 2020- CNRS +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math +import warnings +from pathlib import Path +from typing import Callable, List, Optional, Text, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from pyannote.core import Segment, SlidingWindow, SlidingWindowFeature +from pytorch_lightning.utilities.memory import is_oom_error + +from pyannote.audio.core.io import AudioFile +from pyannote.audio.core.model import Model, Specifications +from pyannote.audio.core.task import Resolution +from pyannote.audio.utils.multi_task import map_with_specifications +from pyannote.audio.utils.permutation import mae_cost_func, permutate +from pyannote.audio.utils.powerset import Powerset +from pyannote.audio.utils.reproducibility import fix_reproducibility + + +class BaseInference: + pass + + +class StreamingInference(BaseInference): + """Inference + + Parameters + ---------- + model : Model + Model. Will be automatically set to eval() mode and moved to `device` when provided. + window : {"sliding", "whole"}, optional + Use a "sliding" window and aggregate the corresponding outputs (default) + or just one (potentially long) window covering the "whole" file or chunk. + duration : float, optional + Chunk duration, in seconds. Defaults to duration used for training the model. + Has no effect when `window` is "whole". + step : float, optional + Step between consecutive chunks, in seconds. Defaults to warm-up duration when + greater than 0s, otherwise 10% of duration. Has no effect when `window` is "whole". + pre_aggregation_hook : callable, optional + When a callable is provided, it is applied to the model output, just before aggregation. + Takes a (num_chunks, num_frames, dimension) numpy array as input and returns a modified + (num_chunks, num_frames, other_dimension) numpy array passed to overlap-add aggregation. + skip_aggregation : bool, optional + Do not aggregate outputs when using "sliding" window. Defaults to False. + skip_conversion: bool, optional + In case a task has been trained with `powerset` mode, output is automatically + converted to `multi-label`, unless `skip_conversion` is set to True. + batch_size : int, optional + Batch size. Larger values (should) make inference faster. Defaults to 32. + device : torch.device, optional + Device used for inference. Defaults to `model.device`. + In case `device` and `model.device` are different, model is sent to device. + use_auth_token : str, optional + When loading a private huggingface.co model, set `use_auth_token` + to True or to a string containing your hugginface.co authentication + token that can be obtained by running `huggingface-cli login` + """ + + def __init__( + self, + model: Union[Model, Text, Path], + window: Text = "sliding", + duration: float = None, + step: float = None, + pre_aggregation_hook: Callable[[np.ndarray], np.ndarray] = None, + skip_aggregation: bool = False, + skip_conversion: bool = False, + device: torch.device = None, + batch_size: int = 32, + use_auth_token: Union[Text, None] = None, + ): + # ~~~~ model ~~~~~ + + self.model = ( + model + if isinstance(model, Model) + else Model.from_pretrained( + model, + map_location=device, + strict=False, + use_auth_token=use_auth_token, + ) + ) + + if device is None: + device = self.model.device + self.device = device + + self.model.eval() + self.model.to(self.device) + + specifications = self.model.specifications + + # ~~~~ sliding window ~~~~~ + + if window not in ["sliding", "whole"]: + raise ValueError('`window` must be "sliding" or "whole".') + + if window == "whole" and any( + s.resolution == Resolution.FRAME for s in specifications + ): + warnings.warn( + 'Using "whole" `window` inference with a frame-based model might lead to bad results ' + 'and huge memory consumption: it is recommended to set `window` to "sliding".' + ) + self.window = window + + training_duration = next(iter(specifications)).duration + duration = duration or training_duration + if training_duration != duration: + warnings.warn( + f"Model was trained with {training_duration:g}s chunks, and you requested " + f"{duration:g}s chunks for inference: this might lead to suboptimal results." + ) + self.duration = duration + + # ~~~~ powerset to multilabel conversion ~~~~ + + self.skip_conversion = skip_conversion + + conversion = list() + for s in specifications: + if s.powerset and not skip_conversion: + c = Powerset(len(s.classes), s.powerset_max_classes) + else: + c = nn.Identity() + conversion.append(c.to(self.device)) + + if isinstance(specifications, Specifications): + self.conversion = conversion[0] + else: + self.conversion = nn.ModuleList(conversion) + + # ~~~~ overlap-add aggregation ~~~~~ + + self.skip_aggregation = skip_aggregation + self.pre_aggregation_hook = pre_aggregation_hook + + self.warm_up = next(iter(specifications)).warm_up + # Use that many seconds on the left- and rightmost parts of each chunk + # to warm up the model. While the model does process those left- and right-most + # parts, only the remaining central part of each chunk is used for aggregating + # scores during inference. + + # step between consecutive chunks + step = step or ( + 0.1 * self.duration if self.warm_up[0] == 0.0 else self.warm_up[0] + ) + + if step > self.duration: + raise ValueError( + f"Step between consecutive chunks is set to {step:g}s, while chunks are " + f"only {self.duration:g}s long, leading to gaps between consecutive chunks. " + f"Either decrease step or increase duration." + ) + self.step = step + + self.batch_size = batch_size + + def to(self, device: torch.device) -> "Inference": + """Send internal model to `device`""" + + if not isinstance(device, torch.device): + raise TypeError( + f"`device` must be an instance of `torch.device`, got `{type(device).__name__}`" + ) + + self.model.to(device) + self.conversion.to(device) + self.device = device + return self + + def infer(self, chunks: torch.Tensor) -> Union[np.ndarray, Tuple[np.ndarray]]: + """Forward pass + + Takes care of sending chunks to right device and outputs back to CPU + + Parameters + ---------- + chunks : (batch_size, num_channels, num_samples) torch.Tensor + Batch of audio chunks. + + Returns + ------- + outputs : (tuple of) (batch_size, ...) np.ndarray + Model output. + """ + + with torch.inference_mode(): + try: + outputs = self.model(chunks.to(self.device)) + except RuntimeError as exception: + if is_oom_error(exception): + raise MemoryError( + f"batch_size ({self.batch_size: d}) is probably too large. " + f"Try with a smaller value until memory error disappears." + ) + else: + raise exception + + def __convert(output: torch.Tensor, conversion: nn.Module, **kwargs): + return conversion(output).cpu().numpy() + + return map_with_specifications( + self.model.specifications, __convert, outputs, self.conversion + ) + + def slide( + self, + waveform: torch.Tensor, + sample_rate: int, + hook: Optional[Callable], + ) -> Union[SlidingWindowFeature, Tuple[SlidingWindowFeature]]: + """Slide model on a waveform + + Parameters + ---------- + waveform: (num_channels, num_samples) torch.Tensor + Waveform. + sample_rate : int + Sample rate. + hook: Optional[Callable] + When a callable is provided, it is called everytime a batch is + processed with two keyword arguments: + - `completed`: the number of chunks that have been processed so far + - `total`: the total number of chunks + + Returns + ------- + output : (tuple of) SlidingWindowFeature + Model output. Shape is (num_chunks, dimension) for chunk-level tasks, + and (num_frames, dimension) for frame-level tasks. + """ + + window_size: int = self.model.audio.get_num_samples(self.duration) + step_size: int = round(self.step * sample_rate) + _, num_samples = waveform.shape + + def __frames( + example_output, specifications: Optional[Specifications] = None + ) -> SlidingWindow: + if specifications.resolution == Resolution.CHUNK: + return SlidingWindow(start=0.0, duration=self.duration, step=self.step) + return example_output.frames + + frames: Union[SlidingWindow, Tuple[SlidingWindow]] = map_with_specifications( + self.model.specifications, + __frames, + self.model.example_output, + ) + + # prepare complete chunks + if num_samples >= window_size: + chunks: torch.Tensor = rearrange( + waveform.unfold(1, window_size, step_size), + "channel chunk frame -> chunk channel frame", + ) + num_chunks, _, _ = chunks.shape + else: + num_chunks = 0 + + # prepare last incomplete chunk + has_last_chunk = (num_samples < window_size) or ( + num_samples - window_size + ) % step_size > 0 + if has_last_chunk: + # pad last chunk with zeros + last_chunk: torch.Tensor = waveform[:, num_chunks * step_size :] + _, last_window_size = last_chunk.shape + last_pad = window_size - last_window_size + last_chunk = F.pad(last_chunk, (0, last_pad)) + + def __empty_list(**kwargs): + return list() + + outputs: Union[ + List[np.ndarray], Tuple[List[np.ndarray]] + ] = map_with_specifications(self.model.specifications, __empty_list) + + if hook is not None: + hook(completed=0, total=num_chunks + has_last_chunk) + + def __append_batch(output, batch_output, **kwargs) -> None: + output.append(batch_output) + return + + # slide over audio chunks in batch + for c in np.arange(0, num_chunks, self.batch_size): + batch: torch.Tensor = chunks[c : c + self.batch_size] + + batch_outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.infer(batch) + + _ = map_with_specifications( + self.model.specifications, __append_batch, outputs, batch_outputs + ) + + if hook is not None: + hook(completed=c + self.batch_size, total=num_chunks + has_last_chunk) + + # process orphan last chunk + if has_last_chunk: + last_outputs = self.infer(last_chunk[None]) + + _ = map_with_specifications( + self.model.specifications, __append_batch, outputs, last_outputs + ) + + if hook is not None: + hook( + completed=num_chunks + has_last_chunk, + total=num_chunks + has_last_chunk, + ) + + def __vstack(output: List[np.ndarray], **kwargs) -> np.ndarray: + return np.vstack(output) + + outputs: Union[np.ndarray, Tuple[np.ndarray]] = map_with_specifications( + self.model.specifications, __vstack, outputs + ) + + def __aggregate( + outputs: np.ndarray, + frames: SlidingWindow, + specifications: Optional[Specifications] = None, + ) -> SlidingWindowFeature: + # skip aggregation when requested, + # or when model outputs just one vector per chunk + # or when model is permutation-invariant (and not post-processed) + if ( + self.skip_aggregation + or specifications.resolution == Resolution.CHUNK + or ( + specifications.permutation_invariant + and self.pre_aggregation_hook is None + ) + ): + frames = SlidingWindow( + start=0.0, duration=self.duration, step=self.step + ) + return SlidingWindowFeature(outputs, frames) + + if self.pre_aggregation_hook is not None: + outputs = self.pre_aggregation_hook(outputs) + + aggregated = self.concatenate_end_chunk( + SlidingWindowFeature( + outputs, + SlidingWindow(start=0.0, duration=self.duration, step=self.step), + ), + frames=frames, + warm_up=self.warm_up, + hamming=True, + missing=0.0, + ) + + # remove padding that was added to last chunk + if has_last_chunk: + aggregated.data = aggregated.crop( + Segment(0.0, num_samples / sample_rate), mode="loose" + ) + + return aggregated + + return map_with_specifications( + self.model.specifications, __aggregate, outputs, frames + ) + + def __call__( + self, file: AudioFile, hook: Optional[Callable] = None + ) -> Union[ + Tuple[Union[SlidingWindowFeature, np.ndarray]], + Union[SlidingWindowFeature, np.ndarray], + ]: + """Run inference on a whole file + + Parameters + ---------- + file : AudioFile + Audio file. + hook : callable, optional + When a callable is provided, it is called everytime a batch is processed + with two keyword arguments: + - `completed`: the number of chunks that have been processed so far + - `total`: the total number of chunks + + Returns + ------- + output : (tuple of) SlidingWindowFeature or np.ndarray + Model output, as `SlidingWindowFeature` if `window` is set to "sliding" + and `np.ndarray` if is set to "whole". + + """ + + fix_reproducibility(self.device) + + waveform, sample_rate = self.model.audio(file) + + if self.window == "sliding": + return self.slide(waveform, sample_rate, hook=hook) + + outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.infer(waveform[None]) + + def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray: + return outputs[0] + + return map_with_specifications( + self.model.specifications, __first_sample, outputs + ) + + def crop( + self, + file: AudioFile, + chunk: Union[Segment, List[Segment]], + duration: Optional[float] = None, + hook: Optional[Callable] = None, + ) -> Union[ + Tuple[Union[SlidingWindowFeature, np.ndarray]], + Union[SlidingWindowFeature, np.ndarray], + ]: + """Run inference on a chunk or a list of chunks + + Parameters + ---------- + file : AudioFile + Audio file. + chunk : Segment or list of Segment + Apply model on this chunk. When a list of chunks is provided and + window is set to "sliding", this is equivalent to calling crop on + the smallest chunk that contains all chunks. In case window is set + to "whole", this is equivalent to concatenating each chunk into one + (artifical) chunk before processing it. + duration : float, optional + Enforce chunk duration (in seconds). This is a hack to avoid rounding + errors that may result in a different number of audio samples for two + chunks of the same duration. + hook : callable, optional + When a callable is provided, it is called everytime a batch is processed + with two keyword arguments: + - `completed`: the number of chunks that have been processed so far + - `total`: the total number of chunks + + Returns + ------- + output : (tuple of) SlidingWindowFeature or np.ndarray + Model output, as `SlidingWindowFeature` if `window` is set to "sliding" + and `np.ndarray` if is set to "whole". + + Notes + ----- + If model needs to be warmed up, remember to extend the requested chunk with the + corresponding amount of time so that it is actually warmed up when processing the + chunk of interest: + >>> chunk_of_interest = Segment(10, 15) + >>> extended_chunk = Segment(10 - warm_up, 15 + warm_up) + >>> inference.crop(file, extended_chunk).crop(chunk_of_interest, returns_data=False) + """ + + fix_reproducibility(self.device) + + if self.window == "sliding": + if not isinstance(chunk, Segment): + start = min(c.start for c in chunk) + end = max(c.end for c in chunk) + chunk = Segment(start=start, end=end) + + waveform, sample_rate = self.model.audio.crop( + file, chunk, duration=duration + ) + outputs: Union[ + SlidingWindowFeature, Tuple[SlidingWindowFeature] + ] = self.slide(waveform, sample_rate, hook=hook) + + def __shift(output: SlidingWindowFeature, **kwargs) -> SlidingWindowFeature: + frames = output.sliding_window + shifted_frames = SlidingWindow( + start=chunk.start, duration=frames.duration, step=frames.step + ) + return SlidingWindowFeature(output.data, shifted_frames) + + return map_with_specifications(self.model.specifications, __shift, outputs) + + if isinstance(chunk, Segment): + waveform, sample_rate = self.model.audio.crop( + file, chunk, duration=duration + ) + else: + waveform = torch.cat( + [self.model.audio.crop(file, c)[0] for c in chunk], dim=1 + ) + + outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.infer(waveform[None]) + + def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray: + return outputs[0] + + return map_with_specifications( + self.model.specifications, __first_sample, outputs + ) + + @staticmethod + def aggregate( + scores: SlidingWindowFeature, + frames: SlidingWindow = None, + warm_up: Tuple[float, float] = (0.0, 0.0), + epsilon: float = 1e-12, + hamming: bool = False, + missing: float = np.NaN, + skip_average: bool = False, + ) -> SlidingWindowFeature: + """Aggregation + + Parameters + ---------- + scores : SlidingWindowFeature + Raw (unaggregated) scores. Shape is (num_chunks, num_frames_per_chunk, num_classes). + frames : SlidingWindow, optional + Frames resolution. Defaults to estimate it automatically based on `scores` shape + and chunk size. Providing the exact frame resolution (when known) leads to better + temporal precision. + warm_up : (float, float) tuple, optional + Left/right warm up duration (in seconds). + missing : float, optional + Value used to replace missing (ie all NaNs) values. + skip_average : bool, optional + Skip final averaging step. + + Returns + ------- + aggregated_scores : SlidingWindowFeature + Aggregated scores. Shape is (num_frames, num_classes) + """ + + num_chunks, num_frames_per_chunk, num_classes = scores.data.shape + + chunks = scores.sliding_window + if frames is None: + duration = step = chunks.duration / num_frames_per_chunk + frames = SlidingWindow(start=chunks.start, duration=duration, step=step) + else: + frames = SlidingWindow( + start=chunks.start, + duration=frames.duration, + step=frames.step, + ) + + masks = 1 - np.isnan(scores) + scores.data = np.nan_to_num(scores.data, copy=True, nan=0.0) + + # Hamming window used for overlap-add aggregation + hamming_window = ( + np.hamming(num_frames_per_chunk).reshape(-1, 1) + if hamming + else np.ones((num_frames_per_chunk, 1)) + ) + + # anything before warm_up_left (and after num_frames_per_chunk - warm_up_right) + # will not be used in the final aggregation + + # warm-up windows used for overlap-add aggregation + warm_up_window = np.ones((num_frames_per_chunk, 1)) + # anything before warm_up_left will not contribute to aggregation + warm_up_left = round( + warm_up[0] / scores.sliding_window.duration * num_frames_per_chunk + ) + warm_up_window[:warm_up_left] = epsilon + # anything after num_frames_per_chunk - warm_up_right either + warm_up_right = round( + warm_up[1] / scores.sliding_window.duration * num_frames_per_chunk + ) + warm_up_window[num_frames_per_chunk - warm_up_right :] = epsilon + + # aggregated_output[i] will be used to store the sum of all predictions + # for frame #i + num_frames = ( + frames.closest_frame( + scores.sliding_window.start + + scores.sliding_window.duration + + (num_chunks - 1) * scores.sliding_window.step + ) + + 1 + ) + + aggregated_output: np.ndarray = np.zeros( + (num_frames, num_classes), dtype=np.float32 + ) + + # overlapping_chunk_count[i] will be used to store the number of chunks + # that contributed to frame #i + overlapping_chunk_count: np.ndarray = np.zeros( + (num_frames, num_classes), dtype=np.float32 + ) + + # aggregated_mask[i] will be used to indicate whether + # at least one non-NAN frame contributed to frame #i + aggregated_mask: np.ndarray = np.zeros( + (num_frames, num_classes), dtype=np.float32 + ) + # loop on the scores of sliding chunks + for (chunk, score), (_, mask) in zip(scores, masks): + # chunk ~ Segment + # score ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray + # mask ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray + start_frame = frames.closest_frame(chunk.start) + + + aggregated_output[start_frame : start_frame + num_frames_per_chunk] += ( + score * mask * hamming_window * warm_up_window + ) + + overlapping_chunk_count[ + start_frame : start_frame + num_frames_per_chunk + ] += (mask * hamming_window * warm_up_window) + + aggregated_mask[ + start_frame : start_frame + num_frames_per_chunk + ] = np.maximum( + aggregated_mask[start_frame : start_frame + num_frames_per_chunk], + mask, + ) + + if skip_average: + average = aggregated_output + else: + average = aggregated_output / np.maximum(overlapping_chunk_count, epsilon) + + average[aggregated_mask == 0.0] = missing + + return SlidingWindowFeature(average, frames) + + @staticmethod + def concatenate_end_chunk( + scores: SlidingWindowFeature, + frames: SlidingWindow = None, + warm_up: Tuple[float, float] = (0.0, 0.0), + epsilon: float = 1e-12, + hamming: bool = False, + missing: float = np.NaN, + skip_average: bool = False, + ) -> SlidingWindowFeature: + """Aggregation + + Parameters + ---------- + scores : SlidingWindowFeature + Raw (unaggregated) scores. Shape is (num_chunks, num_frames_per_chunk, num_classes). + frames : SlidingWindow, optional + Frames resolution. Defaults to estimate it automatically based on `scores` shape + and chunk size. Providing the exact frame resolution (when known) leads to better + temporal precision. + warm_up : (float, float) tuple, optional + Left/right warm up duration (in seconds). + missing : float, optional + Value used to replace missing (ie all NaNs) values. + skip_average : bool, optional + Skip final averaging step. + + Returns + ------- + aggregated_scores : SlidingWindowFeature + Aggregated scores. Shape is (num_frames, num_classes) + """ + print("concatenate") + num_chunks, num_frames_per_chunk, num_classes = scores.data.shape + + chunks = scores.sliding_window + if frames is None: + duration = step = chunks.duration / num_frames_per_chunk + frames = SlidingWindow(start=chunks.start, duration=duration, step=step) + else: + frames = SlidingWindow( + start=chunks.start, + duration=frames.duration, + step=frames.step, + ) + masks = 1 - np.isnan(scores) + scores.data = np.nan_to_num(scores.data, copy=True, nan=0.0) + + # aggregated_output[i] will be used to store the sum of all predictions + # for frame #i + num_frames = ( + frames.closest_frame( + scores.sliding_window.start + + scores.sliding_window.duration + + (num_chunks - 1) * scores.sliding_window.step + ) + + 1 + ) + step_frames = frames.closest_frame(scores.sliding_window.step) + aggregated_output: np.ndarray = np.zeros( + (num_frames, num_classes), dtype=np.float32 + ) + aggregated_output[0 : num_frames_per_chunk-step_frames] = scores[0][:num_frames_per_chunk-step_frames] + end = scores.sliding_window.duration - scores.sliding_window.step + + # data = scores.data + # print(data.shape) + # data=data[1:] + # scores = scores[1:] + # loop on the scores of sliding chunks + for (chunk, score) in scores: + # chunk ~ Segment + # score ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray + start_frame = frames.closest_frame(end) + aggregated_output[start_frame : start_frame + step_frames] = score[num_frames_per_chunk-step_frames:] + end = chunk.end + + return SlidingWindowFeature(aggregated_output, frames) + + @staticmethod + def trim( + scores: SlidingWindowFeature, + warm_up: Tuple[float, float] = (0.1, 0.1), + ) -> SlidingWindowFeature: + """Trim left and right warm-up regions + + Parameters + ---------- + scores : SlidingWindowFeature + (num_chunks, num_frames, num_classes)-shaped scores. + warm_up : (float, float) tuple + Left/right warm up ratio of chunk duration. + Defaults to (0.1, 0.1), i.e. 10% on both sides. + + Returns + ------- + trimmed : SlidingWindowFeature + (num_chunks, trimmed_num_frames, num_speakers)-shaped scores + """ + + assert ( + scores.data.ndim == 3 + ), "Inference.trim expects (num_chunks, num_frames, num_classes)-shaped `scores`" + _, num_frames, _ = scores.data.shape + + chunks = scores.sliding_window + + num_frames_left = round(num_frames * warm_up[0]) + num_frames_right = round(num_frames * warm_up[1]) + + num_frames_step = round(num_frames * chunks.step / chunks.duration) + if num_frames - num_frames_left - num_frames_right < num_frames_step: + warnings.warn( + f"Total `warm_up` is so large ({sum(warm_up) * 100:g}% of each chunk) " + f"that resulting trimmed scores does not cover a whole step ({chunks.step:g}s)" + ) + new_data = scores.data[:, num_frames_left : num_frames - num_frames_right] + + new_chunks = SlidingWindow( + start=chunks.start + warm_up[0] * chunks.duration, + step=chunks.step, + duration=(1 - warm_up[0] - warm_up[1]) * chunks.duration, + ) + + return SlidingWindowFeature(new_data, new_chunks) + + @staticmethod + def stitch( + activations: SlidingWindowFeature, + frames: SlidingWindow = None, + lookahead: Optional[Tuple[int, int]] = None, + cost_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None, + match_func: Callable[[np.ndarray, np.ndarray, float], bool] = None, + ) -> SlidingWindowFeature: + """ + + Parameters + ---------- + activations : SlidingWindowFeature + (num_chunks, num_frames, num_classes)-shaped scores. + frames : SlidingWindow, optional + Frames resolution. Defaults to estimate it automatically based on `activations` + shape and chunk size. Providing the exact frame resolution (when known) leads to better + temporal precision. + lookahead : (int, int) tuple + Number of past and future adjacent chunks to use for stitching. + Defaults to (k, k) with k = chunk_duration / chunk_step - 1 + cost_func : callable + Cost function used to find the optimal mapping between two chunks. + Expects two (num_frames, num_classes) torch.tensor as input + and returns cost as a (num_classes, ) torch.tensor + Defaults to mean absolute error (utils.permutations.mae_cost_func) + match_func : callable + Function used to decide whether two speakers mapped by the optimal + mapping actually are a match. + Expects two (num_frames, ) np.ndarray and the cost (from cost_func) + and returns a boolean. Defaults to always returning True. + """ + + num_chunks, num_frames, num_classes = activations.data.shape + + chunks: SlidingWindow = activations.sliding_window + + if frames is None: + duration = step = chunks.duration / num_frames + frames = SlidingWindow(start=chunks.start, duration=duration, step=step) + else: + frames = SlidingWindow( + start=chunks.start, + duration=frames.duration, + step=frames.step, + ) + + max_lookahead = math.floor(chunks.duration / chunks.step - 1) + if lookahead is None: + lookahead = 2 * (max_lookahead,) + + assert all(L <= max_lookahead for L in lookahead) + + if cost_func is None: + cost_func = mae_cost_func + + if match_func is None: + + def always_match(this: np.ndarray, that: np.ndarray, cost: float): + return True + + match_func = always_match + + stitches = [] + for C, (chunk, activation) in enumerate(activations): + local_stitch = np.NAN * np.zeros( + (sum(lookahead) + 1, num_frames, num_classes) + ) + + for c in range( + max(0, C - lookahead[0]), min(num_chunks, C + lookahead[1] + 1) + ): + # extract common temporal support + shift = round((C - c) * num_frames * chunks.step / chunks.duration) + + if shift < 0: + shift = -shift + this_activations = activation[shift:] + that_activations = activations[c, : num_frames - shift] + else: + this_activations = activation[: num_frames - shift] + that_activations = activations[c, shift:] + + # find the optimal one-to-one mapping + _, (permutation,), (cost,) = permutate( + this_activations[np.newaxis], + that_activations, + cost_func=cost_func, + return_cost=True, + ) + + for this, that in enumerate(permutation): + # only stitch under certain condiditions + matching = (c == C) or ( + match_func( + this_activations[:, this], + that_activations[:, that], + cost[this, that], + ) + ) + + if matching: + local_stitch[c - C + lookahead[0], :, this] = activations[ + c, :, that + ] + + # TODO: do not lookahead further once a mismatch is found + + stitched_chunks = SlidingWindow( + start=chunk.start - lookahead[0] * chunks.step, + duration=chunks.duration, + step=chunks.step, + ) + + local_stitch = Inference.aggregate( + SlidingWindowFeature(local_stitch, stitched_chunks), + frames=frames, + hamming=True, + ) + + stitches.append(local_stitch.data) + + stitches = np.stack(stitches) + stitched_chunks = SlidingWindow( + start=chunks.start - lookahead[0] * chunks.step, + duration=chunks.duration + sum(lookahead) * chunks.step, + step=chunks.step, + ) + + return SlidingWindowFeature(stitches, stitched_chunks) From d038e62dc6271802827b752439bf9a0d7b147c31 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Mon, 25 Mar 2024 17:15:14 +0100 Subject: [PATCH 19/23] remove unnecessary files (guided model and streaming pipelines) --- .../models/segmentation/GuidedPyanNet.py | 234 ----- .../audio/models/segmentation/__init__.py | 3 +- pyannote/audio/pipelines/__init__.py | 2 - .../streaming_speaker_diarization.py | 621 ----------- pyannote/audio/tasks/__init__.py | 2 - .../segmentation/GuidedSpeakerDiarization | 992 ------------------ .../streaming_speaker_diarization.py | 898 ---------------- 7 files changed, 1 insertion(+), 2751 deletions(-) delete mode 100644 pyannote/audio/models/segmentation/GuidedPyanNet.py delete mode 100644 pyannote/audio/pipelines/streaming_speaker_diarization.py delete mode 100644 pyannote/audio/tasks/segmentation/GuidedSpeakerDiarization delete mode 100644 pyannote/audio/tasks/segmentation/streaming_speaker_diarization.py diff --git a/pyannote/audio/models/segmentation/GuidedPyanNet.py b/pyannote/audio/models/segmentation/GuidedPyanNet.py deleted file mode 100644 index 3b84830e0..000000000 --- a/pyannote/audio/models/segmentation/GuidedPyanNet.py +++ /dev/null @@ -1,234 +0,0 @@ -# MIT License -# -# Copyright (c) 2020 CNRS -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - - -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F -from einops import rearrange -from pyannote.core.utils.generators import pairwise - -from pyannote.audio.core.model import Model -from pyannote.audio.core.task import Task -from pyannote.audio.models.blocks.sincnet import SincNet -from pyannote.audio.utils.params import merge_dict - - -class GuidedPyanNet(Model): - """Guided PyanNet segmentation model - SincNet > - > LSTM > Feed forward > Classifier - Guide > - Parameters - ---------- - sample_rate : int, optional - Audio sample rate. Defaults to 16kHz (16000). - num_channels : int, optional - Number of channels. Defaults to mono (1). - sincnet : dict, optional - Keyword arugments passed to the SincNet block. - Defaults to {"stride": 1}. - lstm : dict, optional - Keyword arguments passed to the LSTM layer. - Defaults to {"hidden_size": 128, "num_layers": 2, "bidirectional": True}, - i.e. two bidirectional layers with 128 units each. - Set "monolithic" to False to split monolithic multi-layer LSTM into multiple mono-layer LSTMs. - This may proove useful for probing LSTM internals. - linear : dict, optional - Keyword arugments used to initialize linear layers - Defaults to {"hidden_size": 128, "num_layers": 2}, - i.e. two linear layers with 128 units each. - """ - - SINCNET_DEFAULTS = {"stride": 10} - LSTM_DEFAULTS = { - "hidden_size": 128, - "num_layers": 2, - "bidirectional": True, - "monolithic": True, - "dropout": 0.0, - } - LINEAR_DEFAULTS = {"hidden_size": 128, "num_layers": 2} - - def __init__( - self, - sincnet: dict = None, - lstm: dict = None, - linear: dict = None, - sample_rate: int = 16000, - num_channels: int = 1, - task: Optional[Task] = None, - ): - - super().__init__(sample_rate=sample_rate, num_channels=num_channels, task=task) - - sincnet = merge_dict(self.SINCNET_DEFAULTS, sincnet) - sincnet["sample_rate"] = sample_rate - lstm = merge_dict(self.LSTM_DEFAULTS, lstm) - lstm["batch_first"] = True - linear = merge_dict(self.LINEAR_DEFAULTS, linear) - self.save_hyperparameters("sincnet", "lstm", "linear") - - self.sincnet = SincNet(**self.hparams.sincnet) - - # monolithic = lstm["monolithic"] - # if monolithic: - # multi_layer_lstm = dict(lstm) - # del multi_layer_lstm["monolithic"] - # self.lstm = nn.LSTM(60 + len(self.specifications.classes), **multi_layer_lstm) - - # else: - # num_layers = lstm["num_layers"] - # if num_layers > 1: - # self.dropout = nn.Dropout(p=lstm["dropout"]) - - # one_layer_lstm = dict(lstm) - # one_layer_lstm["num_layers"] = 1 - # one_layer_lstm["dropout"] = 0.0 - # del one_layer_lstm["monolithic"] - - # self.lstm = nn.ModuleList( - # [ - # nn.LSTM( - # 60 + len(self.specifications.classes) - # if i == 0 - # else lstm["hidden_size"] * (2 if lstm["bidirectional"] else 1), - # **one_layer_lstm - # ) - # for i in range(num_layers) - # ] - # ) - - if linear["num_layers"] < 1: - return - - lstm_out_features: int = self.hparams.lstm["hidden_size"] * ( - 2 if self.hparams.lstm["bidirectional"] else 1 - ) - self.linear = nn.ModuleList( - [ - nn.Linear(in_features, out_features) - for in_features, out_features in pairwise( - [ - lstm_out_features, - ] - + [self.hparams.linear["hidden_size"]] - * self.hparams.linear["num_layers"] - ) - ] - ) - - def build(self): - - lstm = dict(self.hparams.lstm) - - if lstm["monolithic"]: - multi_layer_lstm = dict(lstm) - del multi_layer_lstm["monolithic"] - self.lstm = nn.LSTM( - 60 + self.specifications.num_powerset_classes, **multi_layer_lstm - ) - - else: - num_layers = lstm["num_layers"] - if num_layers > 1: - self.dropout = nn.Dropout(p=lstm["dropout"]) - - one_layer_lstm = dict(lstm) - one_layer_lstm["num_layers"] = 1 - one_layer_lstm["dropout"] = 0.0 - del one_layer_lstm["monolithic"] - - self.lstm = nn.ModuleList( - [ - nn.LSTM( - 60 + self.specifications.num_powerset_classes - if i == 0 - else lstm["hidden_size"] * (2 if lstm["bidirectional"] else 1), - **one_layer_lstm - ) - for i in range(num_layers) - ] - ) - - if self.hparams.linear["num_layers"] > 0: - in_features = self.hparams.linear["hidden_size"] - else: - in_features = self.hparams.lstm["hidden_size"] * ( - 2 if self.hparams.lstm["bidirectional"] else 1 - ) - - if self.specifications.powerset: - out_features = self.specifications.num_powerset_classes - else: - out_features = len(self.specifications.classes) - - self.classifier = nn.Linear(in_features, out_features) - self.activation = self.default_activation() - - def forward( - self, waveforms: torch.Tensor, guide: Optional[torch.Tensor] = None - ) -> torch.Tensor: - """Pass forward - Parameters - ---------- - waveforms : (batch, channel, sample) - guide : (batch, frame, classes), optional - Returns - ------- - scores : (batch, frame, classes) - """ - - outputs = self.sincnet(waveforms) - batch_size, num_features, num_frames = outputs.shape - - # TODO: add support for powerset encoding in guide - num_speakers = len(self.specifications.classes) - num_speakers_powerset = self.specifications.num_powerset_classes - if guide is None: - guide = torch.log(torch.full((batch_size,num_frames,num_speakers_powerset), fill_value=1/num_speakers_powerset, device=outputs.device, dtype=outputs.dtype)) - else: - _batch_size, _num_frames, _num_speakers = guide.shape - assert _batch_size == batch_size - assert _num_frames == num_frames - assert _num_speakers == num_speakers_powerset - - guide = rearrange(guide, "batch frame speakers -> batch speakers frame") - outputs = torch.cat([outputs, guide], dim=1) - if self.hparams.lstm["monolithic"]: - outputs, _ = self.lstm( - rearrange(outputs, "batch feature frame -> batch frame feature") - ) - else: - outputs = rearrange(outputs, "batch feature frame -> batch frame feature") - for i, lstm in enumerate(self.lstm): - outputs, _ = lstm(outputs) - if i + 1 < self.hparams.lstm["num_layers"]: - outputs = self.dropout(outputs) - - if self.hparams.linear["num_layers"] > 0: - for linear in self.linear: - outputs = F.leaky_relu(linear(outputs)) - - return self.activation(self.classifier(outputs)) \ No newline at end of file diff --git a/pyannote/audio/models/segmentation/__init__.py b/pyannote/audio/models/segmentation/__init__.py index 10104e3b9..e549f0353 100644 --- a/pyannote/audio/models/segmentation/__init__.py +++ b/pyannote/audio/models/segmentation/__init__.py @@ -22,9 +22,8 @@ from .PyanNet import PyanNet from .SSeRiouSS import SSeRiouSS -from .GuidedPyanNet import GuidedPyanNet from .MultilatencyPyanNet import MultilatencyPyanNet -__all__ = ["PyanNet", "SSeRiouSS", "GuidedPyanNet", "MultilatencyPyanNet"] +__all__ = ["PyanNet", "SSeRiouSS", "MultilatencyPyanNet"] diff --git a/pyannote/audio/pipelines/__init__.py b/pyannote/audio/pipelines/__init__.py index 06554ac4e..0c7d2f25c 100644 --- a/pyannote/audio/pipelines/__init__.py +++ b/pyannote/audio/pipelines/__init__.py @@ -24,7 +24,6 @@ from .overlapped_speech_detection import OverlappedSpeechDetection from .resegmentation import Resegmentation from .speaker_diarization import SpeakerDiarization -from .streaming_speaker_diarization import StreamingSpeakerDiarization from .voice_activity_detection import VoiceActivityDetection __all__ = [ @@ -33,5 +32,4 @@ "SpeakerDiarization", "Resegmentation", "MultiLabelSegmentation", - "StreamingSpeakerDiarization", ] diff --git a/pyannote/audio/pipelines/streaming_speaker_diarization.py b/pyannote/audio/pipelines/streaming_speaker_diarization.py deleted file mode 100644 index 98ed528eb..000000000 --- a/pyannote/audio/pipelines/streaming_speaker_diarization.py +++ /dev/null @@ -1,621 +0,0 @@ -# The MIT License (MIT) -# -# Copyright (c) 2021- CNRS -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -"""Speaker diarization pipelines""" - -import functools -import itertools -import math -from typing import Callable, Optional, Text, Union - -import numpy as np -import torch -from einops import rearrange -from pyannote.core import Annotation, SlidingWindow, SlidingWindowFeature -from pyannote.metrics.diarization import GreedyDiarizationErrorRate -from pyannote.pipeline.parameter import ParamDict, Uniform - -from pyannote.audio import Audio, Inference, Model, Pipeline -from pyannote.audio.core.io import AudioFile -from pyannote.audio.pipelines.clustering import Clustering -from pyannote.audio.pipelines.speaker_verification import PretrainedSpeakerEmbedding -from pyannote.audio.pipelines.utils import ( - PipelineModel, - SpeakerDiarizationMixin, - get_model, -) -from pyannote.audio.utils.signal import binarize - - -def batchify(iterable, batch_size: int = 32, fillvalue=None): - """Batchify iterable""" - # batchify('ABCDEFG', 3) --> ['A', 'B', 'C'] ['D', 'E', 'F'] [G, ] - args = [iter(iterable)] * batch_size - return itertools.zip_longest(*args, fillvalue=fillvalue) - - -class StreamingSpeakerDiarization(SpeakerDiarizationMixin, Pipeline): - """Speaker diarization pipeline - - Parameters - ---------- - segmentation : Model, str, or dict, optional - Pretrained segmentation model. Defaults to "pyannote/segmentation@2022.07". - See pyannote.audio.pipelines.utils.get_model for supported format. - segmentation_step: float, optional - The segmentation model is applied on a window sliding over the whole audio file. - `segmentation_step` controls the step of this window, provided as a ratio of its - duration. Defaults to 0.1 (i.e. 90% overlap between two consecuive windows). - embedding : Model, str, or dict, optional - Pretrained embedding model. Defaults to "pyannote/embedding@2022.07". - See pyannote.audio.pipelines.utils.get_model for supported format. - embedding_exclude_overlap : bool, optional - Exclude overlapping speech regions when extracting embeddings. - Defaults (False) to use the whole speech. - clustering : str, optional - Clustering algorithm. See pyannote.audio.pipelines.clustering.Clustering - for available options. Defaults to "AgglomerativeClustering". - segmentation_batch_size : int, optional - Batch size used for speaker segmentation. Defaults to 1. - embedding_batch_size : int, optional - Batch size used for speaker embedding. Defaults to 1. - der_variant : dict, optional - Optimize for a variant of diarization error rate. - Defaults to {"collar": 0.0, "skip_overlap": False}. This is used in `get_metric` - when instantiating the metric: GreedyDiarizationErrorRate(**der_variant). - use_auth_token : str, optional - When loading private huggingface.co models, set `use_auth_token` - to True or to a string containing your hugginface.co authentication - token that can be obtained by running `huggingface-cli login` - - Usage - ----- - # perform (unconstrained) diarization - >>> diarization = pipeline("/path/to/audio.wav") - - # perform diarization, targetting exactly 4 speakers - >>> diarization = pipeline("/path/to/audio.wav", num_speakers=4) - - # perform diarization, with at least 2 speakers and at most 10 speakers - >>> diarization = pipeline("/path/to/audio.wav", min_speakers=2, max_speakers=10) - - # perform diarization and get one representative embedding per speaker - >>> diarization, embeddings = pipeline("/path/to/audio.wav", return_embeddings=True) - >>> for s, speaker in enumerate(diarization.labels()): - ... # embeddings[s] is the embedding of speaker `speaker` - - Hyper-parameters - ---------------- - segmentation.threshold - segmentation.min_duration_off - clustering.??? - """ - - def __init__( - self, - segmentation: PipelineModel = "pyannote/segmentation@2022.07", - segmentation_step: float = 0.1, - embedding: PipelineModel = "speechbrain/spkrec-ecapa-voxceleb@5c0be3875fda05e81f3c004ed8c7c06be308de1e", - embedding_exclude_overlap: bool = False, - clustering: str = "AgglomerativeClustering", - embedding_batch_size: int = 1, - segmentation_batch_size: int = 1, - der_variant: dict = None, - use_auth_token: Union[Text, None] = None, - latency: float = 0.0, - ): - super().__init__() - self.latency = latency - self.segmentation_model = segmentation - model: Model = get_model(segmentation, use_auth_token=use_auth_token) - - self.segmentation_step = segmentation_step - - self.embedding = embedding - self.embedding_batch_size = embedding_batch_size - self.embedding_exclude_overlap = embedding_exclude_overlap - - self.klustering = clustering - - self.der_variant = der_variant or {"collar": 0.0, "skip_overlap": False} - - segmentation_duration = model.specifications.duration - self._segmentation = Inference( - model, - duration=segmentation_duration, - step=self.segmentation_step * segmentation_duration, - skip_aggregation=True, - batch_size=segmentation_batch_size, - ) - self._frames: SlidingWindow = self._segmentation.model.example_output.frames - - if self._segmentation.model.specifications.powerset: - self.segmentation = ParamDict( - min_duration_off=Uniform(0.0, 1.0), - ) - - else: - self.segmentation = ParamDict( - threshold=Uniform(0.1, 0.9), - min_duration_off=Uniform(0.0, 1.0), - ) - - if self.klustering == "OracleClustering": - metric = "not_applicable" - - else: - self._embedding = PretrainedSpeakerEmbedding( - self.embedding, use_auth_token=use_auth_token - ) - self._audio = Audio(sample_rate=self._embedding.sample_rate, mono="downmix") - metric = self._embedding.metric - - try: - Klustering = Clustering[clustering] - except KeyError: - raise ValueError( - f'clustering must be one of [{", ".join(list(Clustering.__members__))}]' - ) - self.clustering = Klustering.value(metric=metric) - - @property - def segmentation_batch_size(self) -> int: - return self._segmentation.batch_size - - @segmentation_batch_size.setter - def segmentation_batch_size(self, batch_size: int): - self._segmentation.batch_size = batch_size - - def default_parameters(self): - raise NotImplementedError() - - def classes(self): - speaker = 0 - while True: - yield f"SPEAKER_{speaker:02d}" - speaker += 1 - - @property - def CACHED_SEGMENTATION(self): - return "training_cache/segmentation" - - def get_segmentations(self, file, hook=None) -> SlidingWindowFeature: - """Apply segmentation model - - Parameter - --------- - file : AudioFile - hook : Optional[Callable] - - Returns - ------- - segmentations : (num_chunks, num_frames, num_speakers) SlidingWindowFeature - """ - - if hook is not None: - hook = functools.partial(hook, "segmentation", None) - - if self.training: - if self.CACHED_SEGMENTATION in file: - segmentations = file[self.CACHED_SEGMENTATION] - else: - segmentations = self._segmentation(file, hook=hook) - file[self.CACHED_SEGMENTATION] = segmentations - else: - segmentations: SlidingWindowFeature = self._segmentation(file, hook=hook) - - return segmentations - - def get_embeddings( - self, - file, - binary_segmentations: SlidingWindowFeature, - exclude_overlap: bool = False, - hook: Optional[Callable] = None, - ): - """Extract embeddings for each (chunk, speaker) pair - - Parameters - ---------- - file : AudioFile - binary_segmentations : (num_chunks, num_frames, num_speakers) SlidingWindowFeature - Binarized segmentation. - exclude_overlap : bool, optional - Exclude overlapping speech regions when extracting embeddings. - In case non-overlapping speech is too short, use the whole speech. - hook: Optional[Callable] - Called during embeddings after every batch to report the progress - - Returns - ------- - embeddings : (num_chunks, num_speakers, dimension) array - """ - - # when optimizing the hyper-parameters of this pipeline with frozen - # "segmentation.threshold", one can reuse the embeddings from the first trial, - # bringing a massive speed up to the optimization process (and hence allowing to use - # a larger search space). - if self.training: - # we only re-use embeddings if they were extracted based on the same value of the - # "segmentation.threshold" hyperparameter or if the segmentation model relies on - # `powerset` mode - cache = file.get("training_cache/embeddings", dict()) - if ("embeddings" in cache) and ( - self._segmentation.model.specifications.powerset - or (cache["segmentation.threshold"] == self.segmentation.threshold) - ): - return cache["embeddings"] - - duration = binary_segmentations.sliding_window.duration - num_chunks, num_frames, num_speakers = binary_segmentations.data.shape - - if exclude_overlap: - # minimum number of samples needed to extract an embedding - # (a lower number of samples would result in an error) - min_num_samples = self._embedding.min_num_samples - - # corresponding minimum number of frames - num_samples = duration * self._embedding.sample_rate - min_num_frames = math.ceil(num_frames * min_num_samples / num_samples) - - # zero-out frames with overlapping speech - clean_frames = 1.0 * ( - np.sum(binary_segmentations.data, axis=2, keepdims=True) < 2 - ) - clean_segmentations = SlidingWindowFeature( - binary_segmentations.data * clean_frames, - binary_segmentations.sliding_window, - ) - - else: - min_num_frames = -1 - clean_segmentations = SlidingWindowFeature( - binary_segmentations.data, binary_segmentations.sliding_window - ) - - def iter_waveform_and_mask(): - for (chunk, masks), (_, clean_masks) in zip( - binary_segmentations, clean_segmentations - ): - # chunk: Segment(t, t + duration) - # masks: (num_frames, local_num_speakers) np.ndarray - - waveform, _ = self._audio.crop( - file, - chunk, - duration=duration, - mode="pad", - ) - # waveform: (1, num_samples) torch.Tensor - - # mask may contain NaN (in case of partial stitching) - masks = np.nan_to_num(masks, nan=0.0).astype(np.float32) - clean_masks = np.nan_to_num(clean_masks, nan=0.0).astype(np.float32) - - for mask, clean_mask in zip(masks.T, clean_masks.T): - # mask: (num_frames, ) np.ndarray - - if np.sum(clean_mask) > min_num_frames: - used_mask = clean_mask - else: - used_mask = mask - - yield waveform[None], torch.from_numpy(used_mask)[None] - # w: (1, 1, num_samples) torch.Tensor - # m: (1, num_frames) torch.Tensor - - batches = batchify( - iter_waveform_and_mask(), - batch_size=self.embedding_batch_size, - fillvalue=(None, None), - ) - - batch_count = math.ceil(num_chunks * num_speakers / self.embedding_batch_size) - - embedding_batches = [] - - if hook is not None: - hook("embeddings", None, total=batch_count, completed=0) - - for i, batch in enumerate(batches, 1): - waveforms, masks = zip(*filter(lambda b: b[0] is not None, batch)) - - waveform_batch = torch.vstack(waveforms) - # (batch_size, 1, num_samples) torch.Tensor - - mask_batch = torch.vstack(masks) - # (batch_size, num_frames) torch.Tensor - - embedding_batch: np.ndarray = self._embedding( - waveform_batch, masks=mask_batch - ) - # (batch_size, dimension) np.ndarray - - embedding_batches.append(embedding_batch) - - if hook is not None: - hook("embeddings", embedding_batch, total=batch_count, completed=i) - - embedding_batches = np.vstack(embedding_batches) - - embeddings = rearrange(embedding_batches, "(c s) d -> c s d", c=num_chunks) - - # caching embeddings for subsequent trials - # (see comments at the top of this method for more details) - if self.training: - if self._segmentation.model.specifications.powerset: - file["training_cache/embeddings"] = { - "embeddings": embeddings, - } - else: - file["training_cache/embeddings"] = { - "segmentation.threshold": self.segmentation.threshold, - "embeddings": embeddings, - } - - return embeddings - - def reconstruct( - self, - segmentations: SlidingWindowFeature, - hard_clusters: np.ndarray, - count: SlidingWindowFeature, - ) -> SlidingWindowFeature: - """Build final discrete diarization out of clustered segmentation - - Parameters - ---------- - segmentations : (num_chunks, num_frames, num_speakers) SlidingWindowFeature - Raw speaker segmentation. - hard_clusters : (num_chunks, num_speakers) array - Output of clustering step. - count : (total_num_frames, 1) SlidingWindowFeature - Instantaneous number of active speakers. - - Returns - ------- - discrete_diarization : SlidingWindowFeature - Discrete (0s and 1s) diarization. - """ - - num_chunks, num_frames, local_num_speakers = segmentations.data.shape - - num_clusters = np.max(hard_clusters) + 1 - clustered_segmentations = np.NAN * np.zeros( - (num_chunks, num_frames, num_clusters) - ) - - for c, (cluster, (chunk, segmentation)) in enumerate( - zip(hard_clusters, segmentations) - ): - # cluster is (local_num_speakers, )-shaped - # segmentation is (num_frames, local_num_speakers)-shaped - for k in np.unique(cluster): - if k == -2: - continue - - # TODO: can we do better than this max here? - clustered_segmentations[c, :, k] = np.max( - segmentation[:, cluster == k], axis=1 - ) - - clustered_segmentations = SlidingWindowFeature( - clustered_segmentations, segmentations.sliding_window - ) - - return self.to_diarization(clustered_segmentations, count) - - def apply( - self, - file: AudioFile, - num_speakers: int = None, - min_speakers: int = None, - max_speakers: int = None, - return_embeddings: bool = False, - hook: Optional[Callable] = None, - ) -> Annotation: - """Apply speaker diarization - - Parameters - ---------- - file : AudioFile - Processed file. - num_speakers : int, optional - Number of speakers, when known. - min_speakers : int, optional - Minimum number of speakers. Has no effect when `num_speakers` is provided. - max_speakers : int, optional - Maximum number of speakers. Has no effect when `num_speakers` is provided. - return_embeddings : bool, optional - Return representative speaker embeddings. - hook : callable, optional - Callback called after each major steps of the pipeline as follows: - hook(step_name, # human-readable name of current step - step_artefact, # artifact generated by current step - file=file) # file being processed - Time-consuming steps call `hook` multiple times with the same `step_name` - and additional `completed` and `total` keyword arguments usable to track - progress of current step. - - Returns - ------- - diarization : Annotation - Speaker diarization - embeddings : np.array, optional - Representative speaker embeddings such that `embeddings[i]` is the - speaker embedding for i-th speaker in diarization.labels(). - Only returned when `return_embeddings` is True. - """ - - # setup hook (e.g. for debugging purposes) - hook = self.setup_hook(file, hook=hook) - - num_speakers, min_speakers, max_speakers = self.set_num_speakers( - num_speakers=num_speakers, - min_speakers=min_speakers, - max_speakers=max_speakers, - ) - - segmentations = self.get_segmentations(file, hook=hook) - - # change the sliding window and shift the data of segmentations according to latency - sliding_window = segmentations.sliding_window - new_sliding_window = SlidingWindow(start=sliding_window.start, end = sliding_window.end, step = sliding_window.step, duration=sliding_window.duration-self.latency) - segmentations.sliding_window = new_sliding_window - segmentations.data = segmentations.data[:,self._frames.closest_frame(self.latency):,:] - print(self._frames.closest_frame(self.latency)) - - hook("segmentation", segmentations) - # shape: (num_chunks, num_frames, local_num_speakers) - - # estimate frame-level number of instantaneous speakers - count = self.speaker_count( - segmentations, - onset=0.5 - if self._segmentation.model.specifications.powerset - else self.segmentation.threshold, - frames=self._frames, - warm_up=(0.0, 0.0), - ) - - print(count.data.shape) - - hook("speaker_counting", count) - # shape: (num_frames, 1) - # dtype: int - - # exit early when no speaker is ever active - if np.nanmax(count.data) == 0.0: - diarization = Annotation(uri=file["uri"]) - if return_embeddings: - return diarization, np.zeros((0, self._embedding.dimension)) - - return diarization - - # binarize segmentation - if self._segmentation.model.specifications.powerset: - binarized_segmentations = segmentations - else: - binarized_segmentations: SlidingWindowFeature = binarize( - segmentations, - onset=self.segmentation.threshold, - initial_state=False, - ) - - if self.klustering == "OracleClustering" and not return_embeddings: - embeddings = None - else: - print("before embedding") - embeddings = self.get_embeddings( - file, - binarized_segmentations, - exclude_overlap=self.embedding_exclude_overlap, - hook=hook, - ) - hook("embeddings", embeddings) - # shape: (num_chunks, local_num_speakers, dimension) - print("before clustering") - hard_clusters, _, centroids = self.clustering( - embeddings=embeddings, - segmentations=binarized_segmentations, - num_clusters=num_speakers, - min_clusters=min_speakers, - max_clusters=max_speakers, - file=file, # <== for oracle clustering - frames=self._frames, # <== for oracle clustering - ) - # hard_clusters: (num_chunks, num_speakers) - # centroids: (num_speakers, dimension) - - # reconstruct discrete diarization from raw hard clusters - - # keep track of inactive speakers - inactive_speakers = np.sum(binarized_segmentations.data, axis=1) == 0 - # shape: (num_chunks, num_speakers) - - hard_clusters[inactive_speakers] = -2 - - print("before reconstruct") - discrete_diarization = self.reconstruct( - segmentations, - hard_clusters, - count, - ) - hook("discrete_diarization", discrete_diarization) - - # convert to continuous diarization - diarization = self.to_annotation( - discrete_diarization, - min_duration_on=0.0, - min_duration_off=self.segmentation.min_duration_off, - ) - diarization.uri = file["uri"] - - # at this point, `diarization` speaker labels are integers - # from 0 to `num_speakers - 1`, aligned with `centroids` rows. - - if "annotation" in file and file["annotation"]: - # when reference is available, use it to map hypothesized speakers - # to reference speakers (this makes later error analysis easier - # but does not modify the actual output of the diarization pipeline) - _, mapping = self.optimal_mapping( - file["annotation"], diarization, return_mapping=True - ) - - # in case there are more speakers in the hypothesis than in - # the reference, those extra speakers are missing from `mapping`. - # we add them back here - mapping = {key: mapping.get(key, key) for key in diarization.labels()} - - else: - # when reference is not available, rename hypothesized speakers - # to human-readable SPEAKER_00, SPEAKER_01, ... - mapping = { - label: expected_label - for label, expected_label in zip(diarization.labels(), self.classes()) - } - - diarization = diarization.rename_labels(mapping=mapping) - - # at this point, `diarization` speaker labels are strings (or mix of - # strings and integers when reference is available and some hypothesis - # speakers are not present in the reference) - - if not return_embeddings: - return diarization - - # re-order centroids so that they match - # the order given by diarization.labels() - inverse_mapping = {label: index for index, label in mapping.items()} - centroids = centroids[ - [inverse_mapping[label] for label in diarization.labels()] - ] - - # FIXME: the number of centroids may be smaller than the number of speakers - # in the annotation. This can happen if the number of active speakers - # obtained from `speaker_count` for some frames is larger than the number - # of clusters obtained from `clustering`. Will be fixed in the future - - return diarization, centroids - - def get_metric(self) -> GreedyDiarizationErrorRate: - return GreedyDiarizationErrorRate(**self.der_variant) diff --git a/pyannote/audio/tasks/__init__.py b/pyannote/audio/tasks/__init__.py index ddce17d71..814b3e5ce 100644 --- a/pyannote/audio/tasks/__init__.py +++ b/pyannote/audio/tasks/__init__.py @@ -22,7 +22,6 @@ from .segmentation.multilabel import MultiLabelSegmentation # isort:skip from .segmentation.speaker_diarization import SpeakerDiarization # isort:skip -from .segmentation.streaming_speaker_diarization import StreamingSpeakerDiarization # isort:skip from .segmentation.multilatency_speaker_diarization import MultilatencySpeakerDiarization # isort:skip from .segmentation.voice_activity_detection import VoiceActivityDetection # isort:skip @@ -39,7 +38,6 @@ __all__ = [ "SpeakerDiarization", - "StreamingSpeakerDiarization", "VoiceActivityDetection", "OverlappedSpeechDetection", "MultiLabelSegmentation", diff --git a/pyannote/audio/tasks/segmentation/GuidedSpeakerDiarization b/pyannote/audio/tasks/segmentation/GuidedSpeakerDiarization deleted file mode 100644 index e2c4c1771..000000000 --- a/pyannote/audio/tasks/segmentation/GuidedSpeakerDiarization +++ /dev/null @@ -1,992 +0,0 @@ -# MIT License -# -# Copyright (c) 2020- CNRS -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import math -import random -import itertools -from typing import Dict, Optional, Sequence, Text, Union -from torch.utils.data._utils.collate import default_collate - -import numpy as np -import torch -import torch.nn.functional -from collections import defaultdict -from matplotlib import pyplot as plt -from pyannote.core import Segment, SlidingWindowFeature - -from pyannote.database.protocol import SegmentationProtocol, SpeakerDiarizationProtocol -from pyannote.database.protocol.protocol import Scope, Subset -from pytorch_lightning.loggers import MLFlowLogger, TensorBoardLogger -from torch_audiomentations import OneOf -from torch_audiomentations.core.transforms_interface import BaseWaveformTransform -from torch_audiomentations.utils.object_dict import ObjectDict -from torchmetrics import Metric - -from pyannote.audio.core.task import Problem, Resolution, Specifications, Task -from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin -from pyannote.audio.torchmetrics import ( - DiarizationErrorRate, - FalseAlarmRate, - MissedDetectionRate, - OptimalDiarizationErrorRate, - OptimalDiarizationErrorRateThreshold, - OptimalFalseAlarmRate, - OptimalMissedDetectionRate, - OptimalSpeakerConfusionRate, - SpeakerConfusionRate, -) -from pyannote.audio.utils.loss import nll_loss -from pyannote.audio.utils.permutation import permutate -from pyannote.audio.utils.powerset import Powerset - -Subsets = list(Subset.__args__) -Scopes = list(Scope.__args__) - - -class GuidedSpeakerDiarization(SegmentationTaskMixin, Task): - """Guided speaker diarization - Parameters - ---------- - protocol : SpeakerDiarizationProtocol - pyannote.database protocol - duration : float, optional - Chunks duration. Defaults to 10s. - max_speakers_per_chunk : int, optional - Maximum number of speakers per chunk. Defaults to 3. - max_speakers_per_frame : int, optional - Maximum number of (overlapping) speakers per frame. Defaults to 2. - balance: str, optional - When provided, training samples are sampled uniformly with respect to that key. - For instance, setting `balance` to "database" will make sure that each database - will be equally represented in the training samples. - freedom : float, optional - Controls how much freedom the model is allowed regarding the provided guide. - 0.0 means that the model is forced to follow the guide exactly. - 1.0 means that the model is free to ignore the guide completely. - Defaults to 0.5. - batch_size : int, optional - Number of training samples per batch. Defaults to 32. - num_workers : int, optional - Number of workers used for generating training samples. - Defaults to multiprocessing.cpu_count() // 2. - pin_memory : bool, optional - If True, data loaders will copy tensors into CUDA pinned - memory before returning them. See pytorch documentation - for more details. Defaults to False. - augmentation : BaseWaveformTransform, optional - torch_audiomentations waveform transform, used by dataloader - during training. - metric : optional - Validation metric(s). Can be anything supported by torchmetrics.MetricCollection. - Defaults to AUROC (area under the ROC curve). - """ - - def __init__( - self, - protocol: SpeakerDiarizationProtocol, - duration: float = 10.0, - max_speakers_per_chunk: int = 3, - max_speakers_per_frame: int = 2, - balance: Text = None, - freedom: float = 0.5, - batch_size: int = 32, - num_workers: int = None, - pin_memory: bool = False, - augmentation: BaseWaveformTransform = None, - metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None, - step: float = None, - ): - super().__init__( - protocol, - duration=duration, - batch_size=batch_size, - num_workers=num_workers, - pin_memory=pin_memory, - augmentation=augmentation, - metric=metric, - ) - - if not isinstance(protocol, SpeakerDiarizationProtocol): - raise ValueError( - "SpeakerDiarization task requires a SpeakerDiarizationProtocol." - ) - - self.max_speakers_per_chunk = max_speakers_per_chunk - self.max_speakers_per_frame = max_speakers_per_frame - self.balance = balance - self.freedom = freedom - if step == None: - self.step = self.duration / 10 - else: - self.step = step - - self.specifications = Specifications( - problem=Problem.MONO_LABEL_CLASSIFICATION, - resolution=Resolution.FRAME, - duration=self.duration, - classes=[f"speaker#{i+1}" for i in range(self.max_speakers_per_chunk)], - powerset_max_classes=self.max_speakers_per_frame, - permutation_invariant=True, - ) - - def setup_loss_func(self): - self.model.powerset = Powerset( - len(self.specifications.classes), - self.specifications.powerset_max_classes, - ) - - def setup(self): - """Setup""" - - # duration of training chunks - # TODO: handle variable duration case - duration = getattr(self, "duration", 0.0) - - # list of possible values for each metadata key - metadata_unique_values = defaultdict(list) - - metadata_unique_values["subset"] = Subsets - - if isinstance(self.protocol, SpeakerDiarizationProtocol): - metadata_unique_values["scope"] = Scopes - - elif isinstance(self.protocol, SegmentationProtocol): - classes = getattr(self, "classes", list()) - - # make sure classes attribute exists (and set to None if it did not exist) - self.classes = getattr(self, "classes", None) - if self.classes is None: - classes = list() - # metadata_unique_values["classes"] = list(classes) - - audios = list() # list of path to audio files - audio_infos = list() - audio_encodings = list() - metadata = list() # list of metadata - - annotated_duration = list() # total duration of annotated regions (per file) - annotated_regions = list() # annotated regions - annotations = list() # actual annotations - annotated_classes = list() # list of annotated classes (per file) - unique_labels = list() - - if self.has_validation: - files_iter = itertools.chain( - self.protocol.train(), self.protocol.development() - ) - else: - files_iter = self.protocol.train() - - for file_id, file in enumerate(files_iter): - # gather metadata and update metadata_unique_values so that each metadatum - # (e.g. source database or label) is represented by an integer. - metadatum = dict() - - # keep track of source database and subset (train, development, or test) - if file["database"] not in metadata_unique_values["database"]: - metadata_unique_values["database"].append(file["database"]) - metadatum["database"] = metadata_unique_values["database"].index( - file["database"] - ) - metadatum["subset"] = Subsets.index(file["subset"]) - - # keep track of speaker label scope (file, database, or global) for speaker diarization protocols - if isinstance(self.protocol, SpeakerDiarizationProtocol): - metadatum["scope"] = Scopes.index(file["scope"]) - - # keep track of list of classes for regular segmentation protocols - # Different files may be annotated using a different set of classes - # (e.g. one database for speech/music/noise, and another one for male/female/child) - if isinstance(self.protocol, SegmentationProtocol): - if "classes" in file: - local_classes = file["classes"] - else: - local_classes = file["annotation"].labels() - - # if task was not initialized with a fixed list of classes, - # we build it as the union of all classes found in files - if self.classes is None: - for klass in local_classes: - if klass not in classes: - classes.append(klass) - annotated_classes.append( - [classes.index(klass) for klass in local_classes] - ) - - # if task was initialized with a fixed list of classes, - # we make sure that all files use a subset of these classes - # if they don't, we issue a warning and ignore the extra classes - else: - extra_classes = set(local_classes) - set(self.classes) - if extra_classes: - warnings.warn( - f"Ignoring extra classes ({', '.join(extra_classes)}) found for file {file['uri']} ({file['database']}). " - ) - annotated_classes.append( - [ - self.classes.index(klass) - for klass in set(local_classes) & set(self.classes) - ] - ) - - remaining_metadata_keys = set(file) - set( - [ - "uri", - "database", - "subset", - "audio", - "torchaudio.info", - "scope", - "classes", - "annotation", - "annotated", - ] - ) - - # keep track of any other (integer or string) metadata provided by the protocol - # (e.g. a "domain" key for domain-adversarial training) - for key in remaining_metadata_keys: - value = file[key] - - if isinstance(value, str): - if value not in metadata_unique_values[key]: - metadata_unique_values[key].append(value) - metadatum[key] = metadata_unique_values[key].index(value) - - elif isinstance(value, int): - metadatum[key] = value - - else: - warnings.warn( - f"Ignoring '{key}' metadata because of its type ({type(value)}). Only str and int are supported for now.", - category=UserWarning, - ) - - metadata.append(metadatum) - - database_unique_labels = list() - - # reset list of file-scoped labels - file_unique_labels = list() - - # path to audio file - audios.append(str(file["audio"])) - - # audio info - audio_info = file["torchaudio.info"] - audio_infos.append( - ( - audio_info.sample_rate, # sample rate - audio_info.num_frames, # number of frames - audio_info.num_channels, # number of channels - audio_info.bits_per_sample, # bits per sample - ) - ) - audio_encodings.append(audio_info.encoding) # encoding - - # annotated regions and duration - _annotated_duration = 0.0 - for segment in file["annotated"]: - # skip annotated regions that are shorter than training chunk duration - if segment.duration < duration: - continue - - # append annotated region - annotated_region = ( - file_id, - segment.duration, - segment.start, - segment.end, - ) - annotated_regions.append(annotated_region) - - # increment annotated duration - _annotated_duration += segment.duration - - # append annotated duration - annotated_duration.append(_annotated_duration) - - # annotations - for segment, _, label in file["annotation"].itertracks(yield_label=True): - # "scope" is provided by speaker diarization protocols to indicate - # whether speaker labels are local to the file ('file'), consistent across - # all files in a database ('database'), or globally consistent ('global') - - if "scope" in file: - # 0 = 'file' - # 1 = 'database' - # 2 = 'global' - scope = Scopes.index(file["scope"]) - - # update list of file-scope labels - if label not in file_unique_labels: - file_unique_labels.append(label) - # and convert label to its (file-scope) index - file_label_idx = file_unique_labels.index(label) - - database_label_idx = global_label_idx = -1 - - if scope > 0: # 'database' or 'global' - # update list of database-scope labels - if label not in database_unique_labels: - database_unique_labels.append(label) - - # and convert label to its (database-scope) index - database_label_idx = database_unique_labels.index(label) - - if scope > 1: # 'global' - # update list of global-scope labels - if label not in unique_labels: - unique_labels.append(label) - # and convert label to its (global-scope) index - global_label_idx = unique_labels.index(label) - - # basic segmentation protocols do not provide "scope" information - # as classes are global by definition - - else: - try: - file_label_idx = ( - database_label_idx - ) = global_label_idx = classes.index(label) - except ValueError: - # skip labels that are not in the list of classes - continue - - annotations.append( - ( - file_id, # index of file - segment.start, # start time - segment.end, # end time - file_label_idx, # file-scope label index - database_label_idx, # database-scope label index - global_label_idx, # global-scope index - ) - ) - - # since not all metadata keys are present in all files, fallback to -1 when a key is missing - metadata = [ - tuple(metadatum.get(key, -1) for key in metadata_unique_values) - for metadatum in metadata - ] - dtype = [(key, "i") for key in metadata_unique_values] - self.metadata = np.array(metadata, dtype=dtype) - - # NOTE: read with str(self.audios[file_id], encoding='utf-8') - self.audios = np.array(audios, dtype=np.string_) - - # turn list of files metadata into a single numpy array - # TODO: improve using https://github.com/pytorch/pytorch/issues/13246#issuecomment-617140519 - - dtype = [ - ("sample_rate", "i"), - ("num_frames", "i"), - ("num_channels", "i"), - ("bits_per_sample", "i"), - ] - self.audio_infos = np.array(audio_infos, dtype=dtype) - self.audio_encodings = np.array(audio_encodings, dtype=np.string_) - - self.annotated_duration = np.array(annotated_duration) - - # turn list of annotated regions into a single numpy array - dtype = [("file_id", "i"), ("duration", "f"), ("start", "f"), ("end", "f")] - self.annotated_regions = np.array(annotated_regions, dtype=dtype) - - # convert annotated_classes (which is a list of list of classes, one list of classes per file) - # into a single (num_files x num_classes) numpy array: - # * True indicates that this particular class was annotated for this particular file (though it may not be active in this file) - # * False indicates that this particular class was not even annotated (i.e. its absence does not imply that it is not active in this file) - if isinstance(self.protocol, SegmentationProtocol) and self.classes is None: - self.classes = classes - self.annotated_classes = np.zeros( - (len(annotated_classes), len(self.classes)), dtype=np.bool_ - ) - for file_id, classes in enumerate(annotated_classes): - self.annotated_classes[file_id, classes] = True - - # turn list of annotations into a single numpy array - dtype = [ - ("file_id", "i"), - ("start", "f"), - ("end", "f"), - ("file_label_idx", "i"), - ("database_label_idx", "i"), - ("global_label_idx", "i"), - ] - self.annotations = np.array(annotations, dtype=dtype) - - self.metadata_unique_values = metadata_unique_values - - if not self.has_validation: - return - - validation_chunks = list() - - # obtain indexes of files in the validation subset - validation_file_ids = np.where( - self.metadata["subset"] == Subsets.index("development") - )[0] - - # iterate over files in the validation subset - for file_id in validation_file_ids: - # get annotated regions in file - annotated_regions = self.annotated_regions[ - self.annotated_regions["file_id"] == file_id - ] - - # iterate over annotated regions - for annotated_region in annotated_regions: - # number of chunks in annotated region - num_chunks = round((annotated_region["duration"] - self.duration) // self.step) - print(annotated_region["duration"]) - print(num_chunks) - - # iterate over chunks - for c in range(num_chunks//8): - start_time = annotated_region["start"] + c * self.step - validation_chunks.append((file_id, start_time, duration)) - - dtype = [("file_id", "i"), ("start", "f"), ("duration", "f")] - self.validation_chunks = np.array(validation_chunks, dtype=dtype) - - - def prepare_chunk(self, file_id: int, start_time: float, duration: float, number: int = 2): - """Prepare chunk - Parameters - ---------- - file_id : int - File index - start_time : float - Chunk start time - duration : float - Chunk duration. - Returns - ------- - sample : dict - Dictionary containing the chunk data with the following keys: - - `X`: waveform - - `y`: target as a SlidingWindowFeature instance where y.labels is - in meta.scope space. - - `meta`: - - `scope`: target scope (0: file, 1: database, 2: global) - - `database`: database index - - `file`: file index - """ - - file = self.get_file(file_id) - # get label scope - - label_scope = Scopes[self.metadata[file_id]["scope"]] - label_scope_key = f"{label_scope}_label_idx" - - # - chunk = Segment(start_time, start_time + duration) - - sample = dict() - sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration) - - # gather all annotations of current file - annotations = self.annotations[self.annotations["file_id"] == file_id] - - # gather all annotations with non-empty intersection with current chunk - chunk_annotations = annotations[ - (annotations["start"] < chunk.end) & (annotations["end"] > chunk.start) - ] - - # discretize chunk annotations at model output resolution - start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start - start_idx = np.floor(start / self.model.example_output.frames.step).astype( - int - ) - end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start - end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int) - - # get list and number of labels for current scope - labels = list(np.unique(chunk_annotations[label_scope_key])) - num_labels = len(labels) - - if num_labels > self.max_speakers_per_chunk: - pass - - # initial frame-level targets - num_chunks_per_file = getattr(self, "num_chunks_per_file", 1) - y = np.zeros((self.model.example_output.num_frames, num_labels), dtype=np.uint8) - - # map labels to indices - mapping = {label: idx for idx, label in enumerate(labels)} - - for start, end, label in zip( - start_idx, end_idx, chunk_annotations[label_scope_key] - ): - mapped_label = mapping[label] - y[start:end, mapped_label] = 1 - - sample["y"] = SlidingWindowFeature( - y, self.model.example_output.frames, labels=labels - ) - - metadata = self.metadata[file_id] - sample["meta"] = {key: metadata[key] for key in metadata.dtype.names} - sample["meta"]["file"] = file_id - - sample["number"] = number - - return sample - - def collate_y(self, batch) -> torch.Tensor: - """ - Parameters - ---------- - batch : list - List of samples to collate. - "y" field is expected to be a SlidingWindowFeature. - Returns - ------- - y : torch.Tensor - Collated target tensor of shape (batch_size, num_frames, self.max_speakers_per_chunk) - If one chunk has more than `self.max_speakers_per_chunk` speakers, we keep - the max_speakers_per_chunk most talkative ones. If it has less, we pad with - zeros (artificial inactive speakers). - """ - - collated_y = [] - for b in batch: - y = b["y"].data - num_speakers = len(b["y"].labels) - - # keep only the most talkative speakers - if num_speakers > self.max_speakers_per_chunk: - # sort speakers in descending talkativeness order - indices = np.argsort(-np.sum(y, axis=0), axis=0) - y = y[:, indices[: self.max_speakers_per_chunk]] - - elif num_speakers < self.max_speakers_per_chunk: - # create inactive speakers by zero padding - y = np.pad( - y, - ((0, 0), (0, self.max_speakers_per_chunk - num_speakers)), - mode="constant", - ) - - else: - # we have exactly the right number of speakers - pass - - # shuffle speaker indices (to avoid having them sorted in talkativeness decreasing order) as - # the model might otherwise infer prior probabilities from the order of the speakers. we do - # not want this information (partly computed from the second half of the chunk) to leak. - np.random.shuffle(y.T) - - collated_y.append(y) - - return torch.from_numpy(np.stack(collated_y)) - - def collate_number(self, batch) -> torch.Tensor: - return default_collate([b["number"] for b in batch]) - - - def collate_fn(self, batch, stage="train"): - # collate X - collated_X = self.collate_X(batch) - - # collate y - collated_y = self.collate_y(batch) - - # collate metadata - collated_meta = self.collate_meta(batch) - - collated_number = self.collate_number(batch) - - - # apply augmentation (only in "train" stage) - self.augmentation.train(mode=(stage == "train")) - augmented = self.augmentation( - samples=collated_X, - sample_rate=self.model.hparams.sample_rate, - targets=collated_y.unsqueeze(1), - ) - - return { - "X": augmented.samples, - "y": augmented.targets.squeeze(1), - "meta": collated_meta, - "number": collated_number - } - - def segmentation_loss( - self, - permutated_prediction: torch.Tensor, - target: torch.Tensor, - weight: torch.Tensor = None, - ) -> torch.Tensor: - """Permutation-invariant segmentation loss - Parameters - ---------- - permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor - Permutated speaker activity predictions. - target : (batch_size, num_frames, num_speakers) torch.Tensor - Speaker activity. - weight : (batch_size, num_frames, 1) torch.Tensor, optional - Frames weight. - Returns - -------freedom - seg_loss : torch.Tensor - Permutation-invariant segmentation loss - """ - - return nll_loss( - permutated_prediction, - torch.argmax(target, dim=-1), - weight=weight, - ) - - def train__iter__helper(self, rng: random.Random, **filters): - """Iterate over training samples with optional domain filtering - - Parameters - ---------- - rng : random.Random - Random number generator - filters : dict, optional - When provided (as {key: value} dict), filter training files so that - only files such as file[key] == value are used for generating chunks. - - Yields - ------ - chunk : dict - Training chunks. - """ - - # indices of training files that matches domain filters - training = self.metadata["subset"] == Subsets.index("train") - for key, value in filters.items(): - training &= self.metadata[key] == self.metadata_unique_values[key].index(value) - file_ids = np.where(training)[0] - - # turn annotated duration into a probability distribution - annotated_duration = self.annotated_duration[file_ids] - prob_annotated_duration = annotated_duration / np.sum(annotated_duration) - - duration = self.duration - - num_chunks_per_file = getattr(self, "num_chunks_per_file", 1) - while True: - # select one file at random (with probability proportional to its annotated duration) - file_id = np.random.choice(file_ids, p=prob_annotated_duration) - - # generate `num_chunks_per_file` chunks from this file - for _ in range(num_chunks_per_file): - # find indices of annotated regions in this file - annotated_region_indices = np.where( - self.annotated_regions["file_id"] == file_id - )[0] - - # turn annotated regions duration into a probability distribution - prob_annotated_regions_duration = self.annotated_regions["duration"][ - annotated_region_indices - ] / np.sum(self.annotated_regions["duration"][annotated_region_indices]) - - # selected one annotated region at random (with probability proportional to its duration) - annotated_region_index = np.random.choice( - annotated_region_indices, p=prob_annotated_regions_duration - ) - - # select one chunk at random in this annotated region - _, _, start, end = self.annotated_regions[annotated_region_index] - start_time = rng.uniform(start, end - duration - self.step) - - yield self.prepare_chunk(file_id, start_time, duration,0) - yield self.prepare_chunk(file_id, start_time + self.step, duration,1) - - - def training_step(self, batch, batch_idx: int): - """Compute permutation-invariant segmentation loss - Parameters - ---------- - batch : (usually) dict of torch.Tensor - Current batch. - batch_idx: int - Batch index. - Returns - ------- - loss : {str: torch.tensor} - {"loss": loss} - """ - # target - target_multilabel = batch["y"] - # (batch_size, num_frames, num_speakers) - - waveform = batch["X"] - # (batch_size, num_channels, num_samples) - - # drop samples that contain too many speakers - num_speakers: torch.Tensor = torch.sum( - torch.any(target_multilabel, dim=1), dim=1 - ) - keep: torch.Tensor = num_speakers <= self.max_speakers_per_chunk - target_multilabel = target_multilabel[keep] - waveform = waveform[keep] - - # corner case - if not keep.any(): - return {"loss": 0.0} - - target_powerset = self.model.powerset.to_powerset(target_multilabel.float()) - batch_size = target_powerset.size(0) - num_frames = target_powerset.size(1) - num_speakers = target_powerset.size(2) - - - - #create the guide with the same size as the targets (log because predictions are also log) - guide = torch.log(torch.full(target_powerset.size(), fill_value=1/num_speakers, device=target_multilabel.device)) - guide_length = self.duration - self.step - guide_length = int(np.floor(num_frames * guide_length / self.duration)) # round down - - #No guide forward pass - no_guide_predictions_powerset = self.model(waveform) - # permutate target in multilabel space and convert it to powerset space - no_guide_predictions_multilabel = self.model.powerset.to_multilabel(no_guide_predictions_powerset) - permutated_target_multilabel, _ = permutate(no_guide_predictions_multilabel, target_multilabel) - permutated_target_powerset = self.model.powerset.to_powerset(permutated_target_multilabel.float()) - - # compute loss in powerset space (between soft prediction and permutated target) - no_guide_loss = self.segmentation_loss(no_guide_predictions_powerset, permutated_target_powerset) - - #Even forward pass when initializing - #We associate every even batch elements with a guide from the targets - guide[0:batch_size:2, :guide_length,:] = target_powerset[0:batch_size:2, :guide_length,:] - even_predictions_powerset = self.model(waveform[0:batch_size:2], guide=guide[0:batch_size:2]) - # permutate target in multilabel space and # compute loss in powerset space - even_predictions_multilabel = self.model.powerset.to_multilabel(even_predictions_powerset) - permutated_target_multilabel, _ = permutate(even_predictions_multilabel, target_multilabel[1:batch_size:2]) - permutated_target_powerset = self.model.powerset.to_powerset(permutated_target_multilabel.float()) - - # compute loss for the last frames (where there is no target guide) - end_even_loss = self.segmentation_loss(even_predictions_powerset[:, guide_length:, :], permutated_target_powerset[:, guide_length:, :]) - - #Odd forward pass - #We associate every odd batch elements with a guide from the even predictions (here no guide even predictions but could be from target-guided even predictions) - guide[1:batch_size:2, :guide_length,:] = no_guide_predictions_powerset[0:batch_size:2, num_frames-guide_length:, :] - odd_predictions_powerset = self.model(waveform[1:batch_size:2], guide=guide[1:batch_size:2]) - # permutate target in multilabel space and convert it to powerset space - odd_predictions_multilabel = self.model.powerset.to_multilabel(odd_predictions_powerset) - permutated_target_multilabel, _ = permutate(odd_predictions_multilabel, target_multilabel[1:batch_size:2]) - permutated_target_powerset = self.model.powerset.to_powerset(permutated_target_multilabel.float()) - - # compute loss in powerset space (between soft prediction and permutated target) - guided_loss = self.segmentation_loss(odd_predictions_powerset, permutated_target_powerset) - - - #Now we stack the even and the odd predictions - # soft_prediction_powerset = torch.stack([even_prediction_powerset, odd_prediction_powerset], dim=1) - # soft_prediction_powerset = soft_prediction_powerset.reshape(batch_size, num_frames, even_prediction_powerset.size(2)) - - # decide what pass to use in final loss - end_even_loss = 0 - seg_loss = no_guide_loss + end_even_loss + guided_loss - - self.model.log( - "loss/train/segmentation", - seg_loss, - on_step=False, - on_epoch=True, - prog_bar=False, - logger=True, - ) - - loss = seg_loss - - self.model.log( - "loss/train", - loss, - on_step=False, - on_epoch=True, - prog_bar=False, - logger=True, - ) - - return {"loss": loss} - - def default_metric( - self, - ) -> Union[Metric, Sequence[Metric], Dict[str, Metric]]: - """Returns diarization error rate and its components""" - - if self.specifications.powerset: - return { - "DiarizationErrorRate": DiarizationErrorRate(0.5), - - "DiarizationErrorRate/Confusion": SpeakerConfusionRate(0.5), - "DiarizationErrorRate/Miss": MissedDetectionRate(0.5), - "DiarizationErrorRate/FalseAlarm": FalseAlarmRate(0.5), - } - - return { - "DiarizationErrorRate": OptimalDiarizationErrorRate(), - "DiarizationErrorRate/Threshold": OptimalDiarizationErrorRateThreshold(), - "DiarizationErrorRate/Confusion": OptimalSpeakerConfusionRate(), - "DiarizationErrorRate/Miss": OptimalMissedDetectionRate(), - "DiarizationErrorRate/FalseAlarm": OptimalFalseAlarmRate(), - } - - - def val__getitem__(self, idx): - validation_chunk = self.validation_chunks[idx] - return self.prepare_chunk( - validation_chunk["file_id"], - validation_chunk["start"], - duration=validation_chunk["duration"], - number=idx - ) - - # TODO: no need to compute gradient in this method - def validation_step(self, batch, batch_idx: int): - """Compute validation loss and metric - Parameters - ---------- - batch : dict of torch.Tensor - Current batch. - batch_idx: int - Batch index. - """ - print(batch["number"]) - # target - target = batch["y"] - # (batch_size, num_frames, num_speakers) - - waveform = batch["X"] - # (batch_size, num_channels, num_samples) - - # TODO: should we handle validation samples with too many speakers - # waveform = waveform[keep] - # target = target[keep] - target_powerset = self.model.powerset.to_powerset(target.float()) - batch_size = target_powerset.size(0) - num_frames = target_powerset.size(1) - num_speakers = target_powerset.size(2) - - - #create the guide with the same size as the targets - guide = torch.log(torch.full(target_powerset.size(), fill_value=1/num_speakers, device=target.device)) - guide_length = self.duration - self.step - guide_length = int(np.floor(num_frames * guide_length / self.duration)) # round down - - - predictions_powerset = torch.zeros(target_powerset.size(), device=target.device) - predictions_powerset[0] = self.model(waveform[0:1]) - for i in range(1, batch_size): - guide[i, :guide_length] = predictions_powerset[i-1, num_frames-guide_length:] - predictions_powerset[i] = self.model(waveform[i:i+1], guide[i:i+1]) - - multilabel = self.model.powerset.to_multilabel(predictions_powerset) - permutated_target, _ = permutate(multilabel, target) - permutated_target_powerset = self.model.powerset.to_powerset(permutated_target.float()) - - seg_loss = self.segmentation_loss(predictions_powerset[1:], permutated_target_powerset[1:]) - - self.model.log( - "loss/val/segmentation", - seg_loss, - on_step=False, - on_epoch=True, - prog_bar=False, - logger=True, - ) - - self.model.validation_metric( - torch.transpose(multilabel, 1, 2), - torch.transpose(target, 1, 2), - ) - - self.model.log_dict( - self.model.validation_metric, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # log first batch visualization every 2^n epochs. - if ( - self.model.current_epoch == 0 - or math.log2(self.model.current_epoch) % 1 > 0 - or batch_idx > 0 - ): - return - - # visualize first 9 validation samples of first batch in Tensorboard/MLflow - - y = permutated_target.float().cpu().numpy() - y_pred = multilabel.cpu().numpy() - - # prepare 3 x 3 grid (or smaller if batch size is smaller) - num_samples = min(self.batch_size, 9) - nrows = math.ceil(math.sqrt(num_samples)) - ncols = math.ceil(num_samples / nrows) - fig, axes = plt.subplots( - nrows=2 * nrows, ncols=ncols, figsize=(8, 5), squeeze=False - ) - - # reshape target so that there is one line per class when plotting it - y[y == 0] = np.NaN - if len(y.shape) == 2: - y = y[:, :, np.newaxis] - y *= np.arange(y.shape[2]) - - # plot each sample - for sample_idx in range(num_samples): - # find where in the grid it should be plotted - row_idx = sample_idx // nrows - col_idx = sample_idx % ncols - - # plot target - ax_ref = axes[row_idx * 2 + 0, col_idx] - sample_y = y[sample_idx] - ax_ref.plot(sample_y) - ax_ref.set_xlim(0, len(sample_y)) - ax_ref.set_ylim(-1, sample_y.shape[1]) - ax_ref.get_xaxis().set_visible(False) - ax_ref.get_yaxis().set_visible(False) - - # plot predictions - ax_hyp = axes[row_idx * 2 + 1, col_idx] - sample_y_pred = y_pred[sample_idx] - ax_hyp.plot(sample_y_pred) - ax_hyp.set_ylim(-0.1, 1.1) - ax_hyp.set_xlim(0, len(sample_y)) - ax_hyp.get_xaxis().set_visible(False) - - plt.tight_layout() - - for logger in self.model.loggers: - if isinstance(logger, TensorBoardLogger): - logger.experiment.add_figure("samples", fig, self.model.current_epoch) - elif isinstance(logger, MLFlowLogger): - logger.experiment.log_figure( - run_id=logger.run_id, - figure=fig, - artifact_file=f"samples_epoch{self.model.current_epoch}.png", - ) - - plt.close(fig) \ No newline at end of file diff --git a/pyannote/audio/tasks/segmentation/streaming_speaker_diarization.py b/pyannote/audio/tasks/segmentation/streaming_speaker_diarization.py deleted file mode 100644 index 68769bf5d..000000000 --- a/pyannote/audio/tasks/segmentation/streaming_speaker_diarization.py +++ /dev/null @@ -1,898 +0,0 @@ -# MIT License -# -# Copyright (c) 2020- CNRS -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -import sys -import math -import warnings -from collections import Counter -from typing import Dict, Literal, Sequence, Text, Tuple, Union - -import numpy as np -import torch -import torch.nn.functional -from matplotlib import pyplot as plt -from pyannote.core import Segment, SlidingWindowFeature -from pyannote.database.protocol import SpeakerDiarizationProtocol -from pyannote.database.protocol.protocol import Scope, Subset -from pytorch_lightning.loggers import MLFlowLogger, TensorBoardLogger -from rich.progress import track -from torch_audiomentations.core.transforms_interface import BaseWaveformTransform -from torchmetrics import Metric - -from pyannote.audio.core.task import Problem, Resolution, Specifications, Task -from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin -from pyannote.audio.torchmetrics import ( - DiarizationErrorRate, - FalseAlarmRate, - MissedDetectionRate, - OptimalDiarizationErrorRate, - OptimalDiarizationErrorRateThreshold, - OptimalFalseAlarmRate, - OptimalMissedDetectionRate, - OptimalSpeakerConfusionRate, - SpeakerConfusionRate, -) -from pyannote.audio.utils.loss import binary_cross_entropy, mse_loss, nll_loss -from pyannote.audio.utils.permutation import permutate -from pyannote.audio.utils.powerset import Powerset - -Subsets = list(Subset.__args__) -Scopes = list(Scope.__args__) - - -class StreamingSpeakerDiarization(SegmentationTaskMixin, Task): - """Speaker diarization - - Parameters - ---------- - protocol : SpeakerDiarizationProtocol - pyannote.database protocol - duration : float, optional - Chunks duration. Defaults to 2s. - max_speakers_per_chunk : int, optional - Maximum number of speakers per chunk (must be at least 2). - Defaults to estimating it from the training set. - max_speakers_per_frame : int, optional - Maximum number of (overlapping) speakers per frame. - Setting this value to 1 or more enables `powerset multi-class` training. - Default behavior is to use `multi-label` training. - weigh_by_cardinality: bool, optional - Weigh each powerset classes by the size of the corresponding speaker set. - In other words, {0, 1} powerset class weight is 2x bigger than that of {0} - or {1} powerset classes. Note that empty (non-speech) powerset class is - assigned the same weight as mono-speaker classes. Defaults to False (i.e. use - same weight for every class). Has no effect with `multi-label` training. - warm_up : float or (float, float), optional - Use that many seconds on the left- and rightmost parts of each chunk - to warm up the model. While the model does process those left- and right-most - parts, only the remaining central part of each chunk is used for computing the - loss during training, and for aggregating scores during inference. - Defaults to 0. (i.e. no warm-up). - balance: Sequence[Text], optional - When provided, training samples are sampled uniformly with respect to these keys. - For instance, setting `balance` to ["database","subset"] will make sure that each - database & subset combination will be equally represented in the training samples. - weight: str, optional - When provided, use this key as frame-wise weight in loss function. - batch_size : int, optional - Number of training samples per batch. Defaults to 32. - num_workers : int, optional - Number of workers used for generating training samples. - Defaults to multiprocessing.cpu_count() // 2. - pin_memory : bool, optional - If True, data loaders will copy tensors into CUDA pinned - memory before returning them. See pytorch documentation - for more details. Defaults to False. - augmentation : BaseWaveformTransform, optional - torch_audiomentations waveform transform, used by dataloader - during training. - vad_loss : {"bce", "mse"}, optional - Add voice activity detection loss. - Cannot be used in conjunction with `max_speakers_per_frame`. - metric : optional - Validation metric(s). Can be anything supported by torchmetrics.MetricCollection. - Defaults to AUROC (area under the ROC curve). - - References - ---------- - Hervé Bredin and Antoine Laurent - "End-To-End Speaker Segmentation for Overlap-Aware Resegmentation." - Proc. Interspeech 2021 - - Zhihao Du, Shiliang Zhang, Siqi Zheng, and Zhijie Yan - "Speaker Embedding-aware Neural Diarization: an Efficient Framework for Overlapping - Speech Diarization in Meeting Scenarios" - https://arxiv.org/abs/2203.09767 - - """ - - def __init__( - self, - protocol: SpeakerDiarizationProtocol, - duration: float = 2.0, - max_speakers_per_chunk: int = None, - max_speakers_per_frame: int = None, - weigh_by_cardinality: bool = False, - warm_up: Union[float, Tuple[float, float]] = 0.0, - balance: Sequence[Text] = None, - weight: Text = None, - batch_size: int = 32, - num_workers: int = None, - pin_memory: bool = False, - augmentation: BaseWaveformTransform = None, - vad_loss: Literal["bce", "mse"] = None, - metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None, - max_num_speakers: int = None, # deprecated in favor of `max_speakers_per_chunk`` - loss: Literal["bce", "mse"] = None, # deprecated - latency: float = 0.0, - - ): - super().__init__( - protocol, - duration=duration, - warm_up=warm_up, - batch_size=batch_size, - num_workers=num_workers, - pin_memory=pin_memory, - augmentation=augmentation, - metric=metric, - ) - - if not isinstance(protocol, SpeakerDiarizationProtocol): - raise ValueError( - "SpeakerDiarization task requires a SpeakerDiarizationProtocol." - ) - - # deprecation warnings - if max_speakers_per_chunk is None and max_num_speakers is not None: - max_speakers_per_chunk = max_num_speakers - warnings.warn( - "`max_num_speakers` has been deprecated in favor of `max_speakers_per_chunk`." - ) - if loss is not None: - warnings.warn("`loss` has been deprecated and has no effect.") - - # parameter validation - if max_speakers_per_frame is not None: - if max_speakers_per_frame < 1: - raise ValueError( - f"`max_speakers_per_frame` must be 1 or more (you used {max_speakers_per_frame})." - ) - if vad_loss is not None: - raise ValueError( - "`vad_loss` cannot be used jointly with `max_speakers_per_frame`" - ) - - self.max_speakers_per_chunk = max_speakers_per_chunk - self.max_speakers_per_frame = max_speakers_per_frame - self.weigh_by_cardinality = weigh_by_cardinality - self.balance = balance - self.weight = weight - self.vad_loss = vad_loss - self.latency=latency - - - def setup(self): - super().setup() - - # estimate maximum number of speakers per chunk when not provided - if self.max_speakers_per_chunk is None: - training = self.metadata["subset"] == Subsets.index("train") - - num_unique_speakers = [] - progress_description = f"Estimating maximum number of speakers per {self.duration:g}s chunk in the training set" - for file_id in track( - np.where(training)[0], description=progress_description - ): - annotations = self.annotations[ - np.where(self.annotations["file_id"] == file_id)[0] - ] - annotated_regions = self.annotated_regions[ - np.where(self.annotated_regions["file_id"] == file_id)[0] - ] - for region in annotated_regions: - # find annotations within current region - region_start = region["start"] - region_end = region["end"] - region_annotations = annotations[ - np.where( - (annotations["start"] >= region_start) - * (annotations["end"] <= region_end) - )[0] - ] - - for window_start in np.arange( - region_start, region_end - self.duration, 0.25 * self.duration - ): - window_end = window_start + self.duration - window_annotations = region_annotations[ - np.where( - (region_annotations["start"] <= window_end) - * (region_annotations["end"] >= window_start) - )[0] - ] - num_unique_speakers.append( - len(np.unique(window_annotations["file_label_idx"])) - ) - - # because there might a few outliers, estimate the upper bound for the - # number of speakers as the 97th percentile - - num_speakers, counts = zip(*list(Counter(num_unique_speakers).items())) - num_speakers, counts = np.array(num_speakers), np.array(counts) - - sorting_indices = np.argsort(num_speakers) - num_speakers = num_speakers[sorting_indices] - counts = counts[sorting_indices] - - ratios = np.cumsum(counts) / np.sum(counts) - - for k, ratio in zip(num_speakers, ratios): - if k == 0: - print(f" - {ratio:7.2%} of all chunks contain no speech at all.") - elif k == 1: - print(f" - {ratio:7.2%} contain 1 speaker or less") - else: - print(f" - {ratio:7.2%} contain {k} speakers or less") - - self.max_speakers_per_chunk = max( - 2, - num_speakers[np.where(ratios > 0.97)[0][0]], - ) - - print( - f"Setting `max_speakers_per_chunk` to {self.max_speakers_per_chunk}. " - f"You can override this value (or avoid this estimation step) by passing `max_speakers_per_chunk={self.max_speakers_per_chunk}` to the task constructor." - ) - - if ( - self.max_speakers_per_frame is not None - and self.max_speakers_per_frame > self.max_speakers_per_chunk - ): - raise ValueError( - f"`max_speakers_per_frame` ({self.max_speakers_per_frame}) must be smaller " - f"than `max_speakers_per_chunk` ({self.max_speakers_per_chunk})" - ) - - # now that we know about the number of speakers upper bound - # we can set task specifications - self.specifications = Specifications( - problem=Problem.MULTI_LABEL_CLASSIFICATION - if self.max_speakers_per_frame is None - else Problem.MONO_LABEL_CLASSIFICATION, - resolution=Resolution.FRAME, - duration=self.duration, - min_duration=self.min_duration, - warm_up=self.warm_up, - classes=[f"speaker#{i+1}" for i in range(self.max_speakers_per_chunk)], - powerset_max_classes=self.max_speakers_per_frame, - permutation_invariant=True, - ) - - def setup_loss_func(self): - if self.specifications.powerset: - self.model.powerset = Powerset( - len(self.specifications.classes), - self.specifications.powerset_max_classes, - ) - - def prepare_chunk(self, file_id: int, start_time: float, duration: float): - """Prepare chunk - - Parameters - ---------- - file_id : int - File index - start_time : float - Chunk start time - duration : float - Chunk duration. - - Returns - ------- - sample : dict - Dictionary containing the chunk data with the following keys: - - `X`: waveform - - `y`: target as a SlidingWindowFeature instance where y.labels is - in meta.scope space. - - `meta`: - - `scope`: target scope (0: file, 1: database, 2: global) - - `database`: database index - - `file`: file index - """ - - file = self.get_file(file_id) - - # get label scope - label_scope = Scopes[self.metadata[file_id]["scope"]] - label_scope_key = f"{label_scope}_label_idx" - - # - chunk = Segment(start_time, start_time + duration) - - sample = dict() - sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration) - - # gather all annotations of current file - annotations = self.annotations[self.annotations["file_id"] == file_id] - - # gather all annotations with non-empty intersection with current chunk - chunk_annotations = annotations[ - (annotations["start"] < chunk.end) & (annotations["end"] > chunk.start) - ] - - # discretize chunk annotations at model output resolution - start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start - start_idx = np.floor(start / self.model.example_output.frames.step).astype(int) - end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start - end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int) - - # get list and number of labels for current scope - labels = list(np.unique(chunk_annotations[label_scope_key])) - num_labels = len(labels) - - if num_labels > self.max_speakers_per_chunk: - pass - - # initial frame-level targets - y = np.zeros((self.model.example_output.num_frames, num_labels), dtype=np.uint8) - - # map labels to indices - mapping = {label: idx for idx, label in enumerate(labels)} - - for start, end, label in zip( - start_idx, end_idx, chunk_annotations[label_scope_key] - ): - mapped_label = mapping[label] - y[start:end, mapped_label] = 1 - - sample["y"] = SlidingWindowFeature( - y, self.model.example_output.frames, labels=labels - ) - - metadata = self.metadata[file_id] - sample["meta"] = {key: metadata[key] for key in metadata.dtype.names} - sample["meta"]["file"] = file_id - - return sample - - def collate_y(self, batch) -> torch.Tensor: - """ - - Parameters - ---------- - batch : list - List of samples to collate. - "y" field is expected to be a SlidingWindowFeature. - - Returns - ------- - y : torch.Tensor - Collated target tensor of shape (num_frames, self.max_speakers_per_chunk) - If one chunk has more than `self.max_speakers_per_chunk` speakers, we keep - the max_speakers_per_chunk most talkative ones. If it has less, we pad with - zeros (artificial inactive speakers). - """ - - collated_y = [] - for b in batch: - y = b["y"].data - num_speakers = len(b["y"].labels) - if num_speakers > self.max_speakers_per_chunk: - # sort speakers in descending talkativeness order - indices = np.argsort(-np.sum(y, axis=0), axis=0) - # keep only the most talkative speakers - y = y[:, indices[: self.max_speakers_per_chunk]] - - # TODO: we should also sort the speaker labels in the same way - - elif num_speakers < self.max_speakers_per_chunk: - # create inactive speakers by zero padding - y = np.pad( - y, - ((0, 0), (0, self.max_speakers_per_chunk - num_speakers)), - mode="constant", - ) - - else: - # we have exactly the right number of speakers - pass - - collated_y.append(y) - - return torch.from_numpy(np.stack(collated_y)) - - def segmentation_loss( - self, - permutated_prediction: torch.Tensor, - target: torch.Tensor, - weight: torch.Tensor = None, - ) -> torch.Tensor: - """Permutation-invariant segmentation loss - - Parameters - ---------- - permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor - Permutated speaker activity predictions. - target : (batch_size, num_frames, num_speakers) torch.Tensor - Speaker activity. - weight : (batch_size, num_frames, 1) torch.Tensor, optional - Frames weight. - - Returns - ------- - seg_loss : torch.Tensor - Permutation-invariant segmentation loss - """ - if self.specifications.powerset: - # `clamp_min` is needed to set non-speech weight to 1. - class_weight = ( - torch.clamp_min(self.model.powerset.cardinality, 1.0) - if self.weigh_by_cardinality - else None - ) - - seg_loss = nll_loss( - permutated_prediction, - torch.argmax(target, dim=-1), - class_weight=class_weight, - weight=weight, - ) - else: - seg_loss = binary_cross_entropy( - permutated_prediction, target.float(), weight=weight - ) - - return seg_loss - - def voice_activity_detection_loss( - self, - permutated_prediction: torch.Tensor, - target: torch.Tensor, - weight: torch.Tensor = None, - ) -> torch.Tensor: - """Voice activity detection loss - - Parameters - ---------- - permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor - Speaker activity predictions. - target : (batch_size, num_frames, num_speakers) torch.Tensor - Speaker activity. - weight : (batch_size, num_frames, 1) torch.Tensor, optional - Frames weight. - - Returns - ------- - vad_loss : torch.Tensor - Voice activity detection loss. - """ - - vad_prediction, _ = torch.max(permutated_prediction, dim=2, keepdim=True) - # (batch_size, num_frames, 1) - - vad_target, _ = torch.max(target.float(), dim=2, keepdim=False) - # (batch_size, num_frames) - - if self.vad_loss == "bce": - loss = binary_cross_entropy(vad_prediction, vad_target, weight=weight) - - elif self.vad_loss == "mse": - loss = mse_loss(vad_prediction, vad_target, weight=weight) - - return loss - - def training_step(self, batch, batch_idx: int): - """Compute permutation-invariant segmentation loss - - Parameters - ---------- - batch : (usually) dict of torch.Tensor - Current batch. - batch_idx: int - Batch index. - - Returns - ------- - loss : {str: torch.tensor} - {"loss": loss} - """ - - # target - target = batch["y"] - # (batch_size, num_frames, num_speakers) - - waveform = batch["X"] - # (batch_size, num_channels, num_samples) - - # drop samples that contain too many speakers - num_speakers: torch.Tensor = torch.sum(torch.any(target, dim=1), dim=1) - keep: torch.Tensor = num_speakers <= self.max_speakers_per_chunk - target = target[keep] - waveform = waveform[keep] - - # corner case - if not keep.any(): - return None - - # forward pass - prediction = self.model(waveform) - batch_size, num_frames, _ = prediction.shape - # (batch_size, num_frames, num_classes) - - # frames weight - weight_key = getattr(self, "weight", None) - weight = batch.get( - weight_key, - torch.ones(batch_size, num_frames, 1, device=self.model.device), - ) - # (batch_size, num_frames, 1) - - # warm-up - warm_up_left = round(self.warm_up[0] / self.duration * num_frames) - weight[:, :warm_up_left] = 0.0 - warm_up_right = round(self.warm_up[1] / self.duration * num_frames) - weight[:, num_frames - warm_up_right :] = 0.0 - - delay = int(np.floor(num_frames * self.latency / self.duration)) # round down - - prediction = prediction[:, delay:, :] - target = target[:, :num_frames-delay, :] - - if self.specifications.powerset: - multilabel = self.model.powerset.to_multilabel(prediction) - permutated_target, _ = permutate(multilabel, target) - permutated_target_powerset = self.model.powerset.to_powerset( - permutated_target.float() - ) - seg_loss = self.segmentation_loss( - prediction, permutated_target_powerset, weight=weight - ) - - else: - permutated_prediction, _ = permutate(target, prediction) - seg_loss = self.segmentation_loss( - permutated_prediction, target, weight=weight - ) - - self.model.log( - "loss/train/segmentation", - seg_loss, - on_step=False, - on_epoch=True, - prog_bar=False, - logger=True, - ) - - if self.vad_loss is None: - vad_loss = 0.0 - - else: - # TODO: vad_loss probably does not make sense in powerset mode - # because first class (empty set of labels) does exactly this... - if self.specifications.powerset: - vad_loss = self.voice_activity_detection_loss( - prediction, permutated_target_powerset, weight=weight - ) - - else: - vad_loss = self.voice_activity_detection_loss( - permutated_prediction, target, weight=weight - ) - - self.model.log( - "loss/train/vad", - vad_loss, - on_step=False, - on_epoch=True, - prog_bar=False, - logger=True, - ) - - loss = seg_loss + vad_loss - - # skip batch if something went wrong for some reason - if torch.isnan(loss): - return None - - self.model.log( - "loss/train", - loss, - on_step=False, - on_epoch=True, - prog_bar=False, - logger=True, - ) - - return {"loss": loss} - - def default_metric( - self, - ) -> Union[Metric, Sequence[Metric], Dict[str, Metric]]: - """Returns diarization error rate and its components""" - - if self.specifications.powerset: - return { - "DiarizationErrorRate": DiarizationErrorRate(0.5), - "DiarizationErrorRate/Confusion": SpeakerConfusionRate(0.5), - "DiarizationErrorRate/Miss": MissedDetectionRate(0.5), - "DiarizationErrorRate/FalseAlarm": FalseAlarmRate(0.5), - } - - return { - "DiarizationErrorRate": OptimalDiarizationErrorRate(), - "DiarizationErrorRate/Threshold": OptimalDiarizationErrorRateThreshold(), - "DiarizationErrorRate/Confusion": OptimalSpeakerConfusionRate(), - "DiarizationErrorRate/Miss": OptimalMissedDetectionRate(), - "DiarizationErrorRate/FalseAlarm": OptimalFalseAlarmRate(), - } - - # TODO: no need to compute gradient in this method - def validation_step(self, batch, batch_idx: int): - """Compute validation loss and metric - - Parameters - ---------- - batch : dict of torch.Tensor - Current batch. - batch_idx: int - Batch index. - """ - - # target - target = batch["y"] - # (batch_size, num_frames, num_speakers) - - waveform = batch["X"] - # (batch_size, num_channels, num_samples) - - # TODO: should we handle validation samples with too many speakers - # waveform = waveform[keep] - # target = target[keep] - - # forward pass - prediction = self.model(waveform) - batch_size, num_frames, _ = prediction.shape - - # frames weight - weight_key = getattr(self, "weight", None) - weight = batch.get( - weight_key, - torch.ones(batch_size, num_frames, 1, device=self.model.device), - ) - # (batch_size, num_frames, 1) - - # warm-up - warm_up_left = round(self.warm_up[0] / self.duration * num_frames) - weight[:, :warm_up_left] = 0.0 - warm_up_right = round(self.warm_up[1] / self.duration * num_frames) - weight[:, num_frames - warm_up_right :] = 0.0 - - delay = int(np.floor(num_frames * self.latency / self.duration)) # round down - - prediction = prediction[:, delay:, :] - target = target[:, :num_frames-delay, :] - - - if self.specifications.powerset: - multilabel = self.model.powerset.to_multilabel(prediction) - permutated_target, _ = permutate(multilabel, target) - - # FIXME: handle case where target have too many speakers? - # since we don't need - permutated_target_powerset = self.model.powerset.to_powerset( - permutated_target.float() - ) - seg_loss = self.segmentation_loss( - prediction, permutated_target_powerset, weight=weight - ) - - else: - permutated_prediction, _ = permutate(target, prediction) - seg_loss = self.segmentation_loss( - permutated_prediction, target, weight=weight - ) - - self.model.log( - "loss/val/segmentation", - seg_loss, - on_step=False, - on_epoch=True, - prog_bar=False, - logger=True, - ) - - if self.vad_loss is None: - vad_loss = 0.0 - - else: - # TODO: vad_loss probably does not make sense in powerset mode - # because first class (empty set of labels) does exactly this... - if self.specifications.powerset: - vad_loss = self.voice_activity_detection_loss( - prediction, permutated_target_powerset, weight=weight - ) - - else: - vad_loss = self.voice_activity_detection_loss( - permutated_prediction, target, weight=weight - ) - - self.model.log( - "loss/val/vad", - vad_loss, - on_step=False, - on_epoch=True, - prog_bar=False, - logger=True, - ) - - loss = seg_loss + vad_loss - - self.model.log( - "loss/val", - loss, - on_step=False, - on_epoch=True, - prog_bar=False, - logger=True, - ) - - if self.specifications.powerset: - self.model.validation_metric( - torch.transpose( - multilabel[:, warm_up_left : num_frames - warm_up_right], 1, 2 - ), - torch.transpose( - target[:, warm_up_left : num_frames - warm_up_right], 1, 2 - ), - ) - else: - self.model.validation_metric( - torch.transpose( - prediction[:, warm_up_left : num_frames - warm_up_right], 1, 2 - ), - torch.transpose( - target[:, warm_up_left : num_frames - warm_up_right], 1, 2 - ), - ) - - self.model.log_dict( - self.model.validation_metric, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # log first batch visualization every 2^n epochs. - if ( - self.model.current_epoch == 0 - or math.log2(self.model.current_epoch) % 1 > 0 - or batch_idx > 0 - ): - return - - # visualize first 9 validation samples of first batch in Tensorboard/MLflow - - if self.specifications.powerset: - y = permutated_target.float().cpu().numpy() - y_pred = multilabel.cpu().numpy() - else: - y = target.float().cpu().numpy() - y_pred = permutated_prediction.cpu().numpy() - - # prepare 3 x 3 grid (or smaller if batch size is smaller) - num_samples = min(self.batch_size, 9) - nrows = math.ceil(math.sqrt(num_samples)) - ncols = math.ceil(num_samples / nrows) - fig, axes = plt.subplots( - nrows=2 * nrows, ncols=ncols, figsize=(8, 5), squeeze=False - ) - - # reshape target so that there is one line per class when plotting it - y[y == 0] = np.NaN - if len(y.shape) == 2: - y = y[:, :, np.newaxis] - y *= np.arange(y.shape[2]) - - # plot each sample - for sample_idx in range(num_samples): - # find where in the grid it should be plotted - row_idx = sample_idx // nrows - col_idx = sample_idx % ncols - - # plot target - ax_ref = axes[row_idx * 2 + 0, col_idx] - sample_y = y[sample_idx] - ax_ref.plot(sample_y) - ax_ref.set_xlim(0, len(sample_y)) - ax_ref.set_ylim(-1, sample_y.shape[1]) - ax_ref.get_xaxis().set_visible(False) - ax_ref.get_yaxis().set_visible(False) - - # plot predictions - ax_hyp = axes[row_idx * 2 + 1, col_idx] - sample_y_pred = y_pred[sample_idx] - ax_hyp.axvspan(0, warm_up_left, color="k", alpha=0.5, lw=0) - ax_hyp.axvspan( - num_frames - warm_up_right, num_frames, color="k", alpha=0.5, lw=0 - ) - ax_hyp.plot(sample_y_pred) - ax_hyp.set_ylim(-0.1, 1.1) - ax_hyp.set_xlim(0, len(sample_y)) - ax_hyp.get_xaxis().set_visible(False) - - plt.tight_layout() - - for logger in self.model.loggers: - if isinstance(logger, TensorBoardLogger): - logger.experiment.add_figure("samples", fig, self.model.current_epoch) - elif isinstance(logger, MLFlowLogger): - logger.experiment.log_figure( - run_id=logger.run_id, - figure=fig, - artifact_file=f"samples_epoch{self.model.current_epoch}.png", - ) - - plt.close(fig) - - -def main(protocol: str, subset: str = "test", model: str = "pyannote/segmentation"): - """Evaluate a segmentation model""" - - from pyannote.database import FileFinder, get_protocol - from rich.progress import Progress - - from pyannote.audio import Inference - from pyannote.audio.pipelines.utils import get_devices - from pyannote.audio.utils.metric import DiscreteDiarizationErrorRate - from pyannote.audio.utils.signal import binarize - - (device,) = get_devices(needs=1) - metric = DiscreteDiarizationErrorRate() - protocol = get_protocol(protocol, preprocessors={"audio": FileFinder()}) - files = list(getattr(protocol, subset)()) - - with Progress() as progress: - main_task = progress.add_task(protocol.name, total=len(files)) - file_task = progress.add_task("Processing", total=1.0) - - def progress_hook(completed: int = None, total: int = None): - progress.update(file_task, completed=completed / total) - - inference = Inference(model, device=device) - - for file in files: - progress.update(file_task, description=file["uri"]) - reference = file["annotation"] - hypothesis = binarize(inference(file, hook=progress_hook)) - uem = file["annotated"] - _ = metric(reference, hypothesis, uem=uem) - progress.advance(main_task) - - _ = metric.report(display=True) - - -if __name__ == "__main__": - import typer - - typer.run(main) From 84d8887318f541fdeeb78d45ee41f8771485ba05 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Mon, 25 Mar 2024 17:27:56 +0100 Subject: [PATCH 20/23] SegmentationTaskMixin does not exist anymore, replace the heritage in MultilatencySpeakerDiarization --- .../tasks/segmentation/multilatency_speaker_diarization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py index 25c004c34..9dc5cadfb 100644 --- a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py +++ b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py @@ -38,7 +38,7 @@ from torchmetrics import Metric from pyannote.audio.core.task import Problem, Resolution, Specifications, Task -from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin +from pyannote.audio.tasks.segmentation.mixins import SegmentationTask from pyannote.audio.torchmetrics import ( DiarizationErrorRate, FalseAlarmRate, @@ -58,7 +58,7 @@ Scopes = list(Scope.__args__) -class MultilatencySpeakerDiarization(SegmentationTaskMixin, Task): +class MultilatencySpeakerDiarization(SegmentationTask, Task): """Speaker diarization Parameters ---------- From 693e70fecf8eeccd66882052cc6caf6d201c97a4 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Wed, 27 Mar 2024 15:58:40 +0100 Subject: [PATCH 21/23] adapting the new model and task to the newest pyannote implementation --- .../segmentation/MultilatencyPyanNet.py | 80 ++++++++- .../multilatency_speaker_diarization.py | 153 +++++++++++++----- 2 files changed, 186 insertions(+), 47 deletions(-) diff --git a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py index cf716d5d1..c436ffa1c 100644 --- a/pyannote/audio/models/segmentation/MultilatencyPyanNet.py +++ b/pyannote/audio/models/segmentation/MultilatencyPyanNet.py @@ -20,6 +20,19 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from functools import lru_cache +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from pyannote.core.utils.generators import pairwise + +from pyannote.audio.core.model import Model +from pyannote.audio.core.task import Task +from pyannote.audio.models.blocks.sincnet import SincNet +from pyannote.audio.utils.params import merge_dict from typing import Optional from dataclasses import dataclass @@ -161,6 +174,66 @@ def __init__( * self.hparams.linear["num_layers"] ) ]) + + @property + def dimension(self) -> int: + """Dimension of output""" + if isinstance(self.specifications, tuple): + raise ValueError("PyanNet does not support multi-tasking.") + + if self.specifications.powerset: + return self.specifications.num_powerset_classes + else: + return len(self.specifications.classes) + + @lru_cache + def num_frames(self, num_samples: int) -> int: + """Compute number of output frames for a given number of input samples + + Parameters + ---------- + num_samples : int + Number of input samples + + Returns + ------- + num_frames : int + Number of output frames + """ + + return self.sincnet.num_frames(num_samples) + + def receptive_field_size(self, num_frames: int = 1) -> int: + """Compute size of receptive field + + Parameters + ---------- + num_frames : int, optional + Number of frames in the output signal + + Returns + ------- + receptive_field_size : int + Receptive field size. + """ + return self.sincnet.receptive_field_size(num_frames=num_frames) + + def receptive_field_center(self, frame: int = 0) -> int: + """Compute center of receptive field + + Parameters + ---------- + frame : int, optional + Frame index + + Returns + ------- + receptive_field_center : int + Index of receptive field center. + """ + + return self.sincnet.receptive_field_center(frame=frame) + def build(self): if self.hparams.linear["num_layers"] > 0: @@ -210,15 +283,10 @@ def forward(self, waveforms: torch.Tensor) -> torch.Tensor: # tensor of size (batch_size, num_frames, num_speakers * K) where K is the number of latencies predictions = self.activation(self.classifier(outputs)) num_classes_powerset = predictions.size(2) //len(self.latency_list) - # # tensor of size (batch_size, num_frames, num_speakers, K) - # predictions = predictions.view(predictions.size(0), predictions.size(1), predictions.size(2) // len(self.latency_list), len(self.latency_list)) - - # # tensor of size (k, batch_size, num_frames, num_speakers) - # predictions = predictions.permute(3, 0, 1, 2) if self.latency_index == -1: # return all latencies return predictions # return only the corresponding latency - return predictions[:,:,self.latency_index*num_classes_powerset:self.latency_index*num_classes_powerset+num_classes_powerset] \ No newline at end of file + return predictions[:,:, self.latency_index * num_classes_powerset : self.latency_index*num_classes_powerset + num_classes_powerset] \ No newline at end of file diff --git a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py index 9dc5cadfb..aa35c505e 100644 --- a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py +++ b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py @@ -23,7 +23,8 @@ import math import warnings from collections import Counter -from typing import Dict, Literal, Sequence, Text, Tuple, Union, List +from typing import Dict, Literal, Sequence, Text, Tuple, Union, List, Optional +import torch.nn.functional as F import numpy as np import torch @@ -50,13 +51,63 @@ OptimalSpeakerConfusionRate, SpeakerConfusionRate, ) -from pyannote.audio.utils.loss import binary_cross_entropy, mse_loss, nll_loss +from pyannote.audio.utils.loss import binary_cross_entropy, mse_loss, interpolate +# from pyannote.audio.utils.loss import nll_loss + from pyannote.audio.utils.permutation import permutate from pyannote.audio.utils.powerset import Powerset Subsets = list(Subset.__args__) Scopes = list(Scope.__args__) +def nll_loss( + prediction: torch.Tensor, + target: torch.Tensor, + class_weight: Optional[torch.Tensor] = None, + weight: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """Frame-weighted negative log-likelihood loss + + Parameters + ---------- + prediction : torch.Tensor + Prediction with shape (batch_size, num_frames, num_classes). + target : torch.Tensor + Target with shape (batch_size, num_frames) + class_weight : (num_classes, ) torch.Tensor, optional + Class weight with shape (num_classes, ) + weight : (batch_size, num_frames, 1) torch.Tensor, optional + Frame weight with shape (batch_size, num_frames, 1). + + Returns + ------- + loss : torch.Tensor + """ + + num_classes = prediction.shape[2] + + losses = F.nll_loss( + prediction.reshape(-1, num_classes), + # (batch_size x num_frames, num_classes) + target.view(-1), + # (batch_size x num_frames, ) + weight=class_weight, + # (num_classes, ) + reduction="none", + ).view(target.shape) + # (batch_size, num_frames) + + if weight is None: + return torch.mean(losses) + + else: + # interpolate weight + weight = interpolate(target, weight=weight).squeeze(dim=2) + # (batch_size, num_frames) + + return torch.sum(losses * weight) / torch.sum(weight) + + class MultilatencySpeakerDiarization(SegmentationTask, Task): """Speaker diarization @@ -123,22 +174,24 @@ class MultilatencySpeakerDiarization(SegmentationTask, Task): def __init__( self, protocol: SpeakerDiarizationProtocol, + cache: Optional[Union[str, None]] = None, duration: float = 2.0, - max_speakers_per_chunk: int = None, - max_speakers_per_frame: int = None, + max_speakers_per_chunk: Optional[int] = None, + max_speakers_per_frame: Optional[int] = None, weigh_by_cardinality: bool = False, warm_up: Union[float, Tuple[float, float]] = 0.0, - balance: Sequence[Text] = None, - weight: Text = None, + balance: Optional[Sequence[Text]] = None, + weight: Optional[Text] = None, batch_size: int = 32, - num_workers: int = None, + num_workers: Optional[int] = None, pin_memory: bool = False, - augmentation: BaseWaveformTransform = None, + augmentation: Optional[BaseWaveformTransform] = None, vad_loss: Literal["bce", "mse"] = None, metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None, - max_num_speakers: int = None, # deprecated in favor of `max_speakers_per_chunk`` + max_num_speakers: Optional[ + int + ] = None, # deprecated in favor of `max_speakers_per_chunk`` loss: Literal["bce", "mse"] = None, # deprecated - latency: float = 0.0, latency_list: List[float] = [0.0], ): @@ -151,6 +204,7 @@ def __init__( pin_memory=pin_memory, augmentation=augmentation, metric=metric, + cache=cache, ) if not isinstance(protocol, SpeakerDiarizationProtocol): @@ -184,32 +238,37 @@ def __init__( self.balance = balance self.weight = weight self.vad_loss = vad_loss - self.latency=latency self.latency_list=latency_list - def setup(self): - super().setup() + def setup(self, stage=None): + super().setup(stage) # estimate maximum number of speakers per chunk when not provided if self.max_speakers_per_chunk is None: - training = self.metadata["subset"] == Subsets.index("train") + training = self.prepared_data["audio-metadata"]["subset"] == Subsets.index( + "train" + ) num_unique_speakers = [] progress_description = f"Estimating maximum number of speakers per {self.duration:g}s chunk in the training set" for file_id in track( np.where(training)[0], description=progress_description ): - annotations = self.annotations[ - np.where(self.annotations["file_id"] == file_id)[0] + annotations = self.prepared_data["annotations-segments"][ + np.where( + self.prepared_data["annotations-segments"]["file_id"] == file_id + )[0] ] - annotated_regions = self.annotated_regions[ - np.where(self.annotated_regions["file_id"] == file_id)[0] + annotated_regions = self.prepared_data["annotations-regions"][ + np.where( + self.prepared_data["annotations-regions"]["file_id"] == file_id + )[0] ] for region in annotated_regions: # find annotations within current region region_start = region["start"] - region_end = region["end"] + region_end = region["start"] + region["duration"] region_annotations = annotations[ np.where( (annotations["start"] >= region_start) @@ -294,6 +353,7 @@ def setup_loss_func(self): def prepare_chunk(self, file_id: int, start_time: float, duration: float): """Prepare chunk + Parameters ---------- file_id : int @@ -302,6 +362,7 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float): Chunk start time duration : float Chunk duration. + Returns ------- sample : dict @@ -318,7 +379,7 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float): file = self.get_file(file_id) # get label scope - label_scope = Scopes[self.metadata[file_id]["scope"]] + label_scope = Scopes[self.prepared_data["audio-metadata"][file_id]["scope"]] label_scope_key = f"{label_scope}_label_idx" # @@ -328,7 +389,9 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float): sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration) # gather all annotations of current file - annotations = self.annotations[self.annotations["file_id"] == file_id] + annotations = self.prepared_data["annotations-segments"][ + self.prepared_data["annotations-segments"]["file_id"] == file_id + ] # gather all annotations with non-empty intersection with current chunk chunk_annotations = annotations[ @@ -336,10 +399,14 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float): ] # discretize chunk annotations at model output resolution - start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start - start_idx = np.floor(start / self.model.example_output.frames.step).astype(int) - end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start - end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int) + step = self.model.receptive_field.step + half = 0.5 * self.model.receptive_field.duration + + start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start - half + start_idx = np.maximum(0, np.round(start / step)).astype(int) + + end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start - half + end_idx = np.round(end / step).astype(int) # get list and number of labels for current scope labels = list(np.unique(chunk_annotations[label_scope_key])) @@ -349,7 +416,10 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float): pass # initial frame-level targets - y = np.zeros((self.model.example_output.num_frames, num_labels), dtype=np.uint8) + num_frames = self.model.num_frames( + round(duration * self.model.hparams.sample_rate) + ) + y = np.zeros((num_frames, num_labels), dtype=np.uint8) # map labels to indices mapping = {label: idx for idx, label in enumerate(labels)} @@ -358,13 +428,11 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float): start_idx, end_idx, chunk_annotations[label_scope_key] ): mapped_label = mapping[label] - y[start:end, mapped_label] = 1 + y[start : end + 1, mapped_label] = 1 - sample["y"] = SlidingWindowFeature( - y, self.model.example_output.frames, labels=labels - ) + sample["y"] = SlidingWindowFeature(y, self.model.receptive_field, labels=labels) - metadata = self.metadata[file_id] + metadata = self.prepared_data["audio-metadata"][file_id] sample["meta"] = {key: metadata[key] for key in metadata.dtype.names} sample["meta"]["file"] = file_id @@ -372,11 +440,13 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float): def collate_y(self, batch) -> torch.Tensor: """ + Parameters ---------- batch : list List of samples to collate. "y" field is expected to be a SlidingWindowFeature. + Returns ------- y : torch.Tensor @@ -414,13 +484,15 @@ def collate_y(self, batch) -> torch.Tensor: return torch.from_numpy(np.stack(collated_y)) + def segmentation_loss( self, permutated_prediction: torch.Tensor, target: torch.Tensor, - weight: torch.Tensor = None, + weight: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Permutation-invariant segmentation loss + Parameters ---------- permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor @@ -429,11 +501,13 @@ def segmentation_loss( Speaker activity. weight : (batch_size, num_frames, 1) torch.Tensor, optional Frames weight. + Returns ------- seg_loss : torch.Tensor Permutation-invariant segmentation loss """ + if self.specifications.powerset: # `clamp_min` is needed to set non-speech weight to 1. class_weight = ( @@ -441,27 +515,23 @@ def segmentation_loss( if self.weigh_by_cardinality else None ) - - seg_loss = nll_loss( - permutated_prediction, - torch.argmax(target, dim=-1), - class_weight=class_weight, - weight=weight, - ) + seg_loss = nll_loss(permutated_prediction, torch.argmax(target, dim=-1)) else: seg_loss = binary_cross_entropy( permutated_prediction, target.float(), weight=weight ) return seg_loss + def voice_activity_detection_loss( self, permutated_prediction: torch.Tensor, target: torch.Tensor, - weight: torch.Tensor = None, + weight: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Voice activity detection loss + Parameters ---------- permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor @@ -470,6 +540,7 @@ def voice_activity_detection_loss( Speaker activity. weight : (batch_size, num_frames, 1) torch.Tensor, optional Frames weight. + Returns ------- vad_loss : torch.Tensor @@ -886,7 +957,7 @@ def main(protocol: str, subset: str = "test", model: str = "pyannote/segmentatio main_task = progress.add_task(protocol.name, total=len(files)) file_task = progress.add_task("Processing", total=1.0) - def progress_hook(completed: int = None, total: int = None): + def progress_hook(completed: Optional[int] = None, total: Optional[int] = None): progress.update(file_task, completed=completed / total) inference = Inference(model, device=device) From 0c64b5ae6f5b7aaf6be3a0898f4f79ae6bd6ec8f Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Fri, 29 Mar 2024 14:07:57 +0100 Subject: [PATCH 22/23] add comments --- pyannote/audio/core/streaming_inference.py | 2 +- .../multilatency_speaker_diarization.py | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/pyannote/audio/core/streaming_inference.py b/pyannote/audio/core/streaming_inference.py index 9063a8c58..1a8bfcdcf 100644 --- a/pyannote/audio/core/streaming_inference.py +++ b/pyannote/audio/core/streaming_inference.py @@ -24,7 +24,7 @@ import warnings from pathlib import Path from typing import Callable, List, Optional, Text, Tuple, Union - +from pyannote.audio import Inference import numpy as np import torch import torch.nn as nn diff --git a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py index aa35c505e..0cba0a47f 100644 --- a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py +++ b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py @@ -593,10 +593,12 @@ def training_step(self, batch, batch_idx: int): return None # forward pass + # tensor of size (batch_size, num_frames, num_speakers * K) where K is the number of latencies predictions = self.model(waveform) num_classes_powerset = predictions.size(2) //len(self.latency_list) seg_loss = 0 for k in range(len(self.latency_list)): + # select onle latency at a time prediction = predictions[:,:,k*num_classes_powerset:k*num_classes_powerset+num_classes_powerset] batch_size, num_frames, _ = prediction.shape # (batch_size, num_frames, num_classes) @@ -615,17 +617,12 @@ def training_step(self, batch, batch_idx: int): warm_up_right = round(self.warm_up[1] / self.duration * num_frames) weight[:, num_frames - warm_up_right :] = 0.0 + # shift prediction and target delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down - prediction = prediction[:, delay:, :] target = target[:, :num_frames-delay, :] - #future - # prediction = prediction[:, :num_frames-delay, :] - # target = target[:, delay:, :] - - - + # compute loss (all losses are added, there are K losses) if self.specifications.powerset: multilabel = self.model.powerset.to_multilabel(prediction) permutated_target, _ = permutate(multilabel, target) @@ -738,10 +735,12 @@ def validation_step(self, batch, batch_idx: int): # target = target[keep] # forward pass + # tensor of size (batch_size, num_frames, num_speakers * K) where K is the number of latencies predictions = self.model(waveform) losses=[] num_classes_powerset = predictions.size(2) //len(self.latency_list) for k in range(len(self.latency_list)): + # select one latency prediction = predictions[:,:,k*num_classes_powerset:k*num_classes_powerset+num_classes_powerset] batch_size, num_frames, _ = prediction.shape @@ -759,12 +758,12 @@ def validation_step(self, batch, batch_idx: int): warm_up_right = round(self.warm_up[1] / self.duration * num_frames) weight[:, num_frames - warm_up_right :] = 0.0 + # shift prediction and target delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down - prediction = prediction[:, delay:, :] reference = target[:, :num_frames-delay, :] - #future + # future # prediction = prediction[:, :num_frames-delay, :] # target = target[:, delay:, :] From 0cac2b1e4aba8b1b3056d7923a35ebcc619114f3 Mon Sep 17 00:00:00 2001 From: Bilal RAHOU Date: Fri, 29 Mar 2024 15:54:56 +0100 Subject: [PATCH 23/23] add again the possibility to train a model with negative latency, and the validation DER is now calculated on the first latency (instead of the last) --- .../multilatency_speaker_diarization.py | 33 +++++++++++-------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py index 0cba0a47f..57fe6c2d6 100644 --- a/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py +++ b/pyannote/audio/tasks/segmentation/multilatency_speaker_diarization.py @@ -618,11 +618,16 @@ def training_step(self, batch, batch_idx: int): weight[:, num_frames - warm_up_right :] = 0.0 # shift prediction and target - delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down - prediction = prediction[:, delay:, :] - target = target[:, :num_frames-delay, :] - - # compute loss (all losses are added, there are K losses) + if self.latency_list[k] >= 0: + delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down + prediction = prediction[:, delay:, :] + target = target[:, :num_frames-delay, :] + else: + delay = int(np.floor(num_frames * (-1.0 * self.latency_list[k]) / self.duration)) # round down + prediction = prediction[:, :num_frames-delay, :] + target = target[:, delay:, :] + + #compute loss (all losses are added, there are K losses) if self.specifications.powerset: multilabel = self.model.powerset.to_multilabel(prediction) permutated_target, _ = permutate(multilabel, target) @@ -759,13 +764,14 @@ def validation_step(self, batch, batch_idx: int): weight[:, num_frames - warm_up_right :] = 0.0 # shift prediction and target - delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down - prediction = prediction[:, delay:, :] - reference = target[:, :num_frames-delay, :] - - # future - # prediction = prediction[:, :num_frames-delay, :] - # target = target[:, delay:, :] + if self.latency_list[k] >= 0: + delay = int(np.floor(num_frames * (self.latency_list[k]) / self.duration)) # round down + prediction = prediction[:, delay:, :] + reference = target[:, :num_frames-delay, :] + else: + delay = int(np.floor(num_frames * (-1.0 * self.latency_list[k]) / self.duration)) # round down + prediction = prediction[:, :num_frames-delay, :] + reference = target[:, delay:, :] if self.specifications.powerset: multilabel = self.model.powerset.to_multilabel(prediction) @@ -786,7 +792,8 @@ def validation_step(self, batch, batch_idx: int): permutated_prediction, reference, weight=weight )) - target = target[:, :num_frames-delay, :] + # with the following line, the validation DER is calculated on the first latency prediction + multilabel = self.model.powerset.to_multilabel(predictions[:,:,:num_classes_powerset]) seg_loss = torch.sum(torch.tensor(losses))