Copy a token from your Hugging Face\ntokens page and paste it below. Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file.
Copy a token from your Hugging Face\ntokens page and paste it below. Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file.
"
+ }
+ },
+ "b2be65e192384c948fb8987d4cfca505": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "ba18cded436e486da34882d821d8f1eb": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ButtonModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ButtonModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ButtonView",
+ "button_style": "",
+ "description": "Login",
+ "disabled": false,
+ "icon": "",
+ "layout": "IPY_MODEL_0e382d66f09f4958a40baa7ab83c4ccb",
+ "style": "IPY_MODEL_6a45ce374e2e47ba9457d02e02522748",
+ "tooltip": ""
+ }
+ },
+ "c8731777ce834e58a76a295076200cfc": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "VBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "VBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "VBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_859b12a6d95b4c6f987791ca848122b9",
+ "IPY_MODEL_94756148d2e94a93ae233baba20af683",
+ "IPY_MODEL_ba18cded436e486da34882d821d8f1eb",
+ "IPY_MODEL_99898e6ee64a46bd832af112e79b58b7"
+ ],
+ "layout": "IPY_MODEL_79184c8c2a6f4b7493bb7f6983f18a09"
+ }
+ },
+ "ea95ffd922c0455d957120f034e541f8": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ }
+ }
}
- }
- }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
\ No newline at end of file
From 54ddfa362e7a0e3ef523b6570c318c0d4d676991 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Thu, 16 Nov 2023 20:44:06 +0100
Subject: [PATCH 21/57] doc: add progress bar hook
---
tutorials/intro.ipynb | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/tutorials/intro.ipynb b/tutorials/intro.ipynb
index 2df5081c3..2aaa911f7 100644
--- a/tutorials/intro.ipynb
+++ b/tutorials/intro.ipynb
@@ -429,8 +429,10 @@
"if torch.cuda.is_available():\n",
" pipeline.to(torch.device('cuda'))\n",
"\n",
- "# run the pipeline\n",
- "diarization = pipeline(DEMO_FILE)"
+ "# run the pipeline (with progress bar)\n",
+ "from pyannote.audio.pipelines.utils.hook import ProgressHook\n",
+ "with ProgressHook() as hook:\n",
+ " diarization = pipeline(DEMO_FILE, hook=hook)"
]
},
{
From 1882ff17683e8380ba4cb20e796385753595e48d Mon Sep 17 00:00:00 2001
From: Ohad Hen
Date: Fri, 17 Nov 2023 10:25:01 +0200
Subject: [PATCH 22/57] doc(setup): update ipython (8.10.0) and Sphinx (3.0.4)
(#1391)
https://security.snyk.io/package/pip/Sphinx/2.2.2
https://security.snyk.io/package/pip/ipython/7.16.3
---
doc/requirements.txt | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/doc/requirements.txt b/doc/requirements.txt
index a0b596dbc..5377da241 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -1,4 +1,4 @@
-ipython==7.16.3
+ipython==8.10.0
recommonmark
-Sphinx==2.2.2
+Sphinx==3.0.4
sphinx_rtd_theme==0.4.3
From 28b5531cec35c70b7f0353a502061dcd1dd11e1d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Fri, 24 Nov 2023 12:36:30 +0100
Subject: [PATCH 23/57] doc: add code of conduct (#1560)
---
CODE_OF_CONDUCT.md | 128 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 128 insertions(+)
create mode 100644 CODE_OF_CONDUCT.md
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..b53ae3b44
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+herve.bredin@irit.fr.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
From b4ed44bb23717c794b71ac086d397f64471bb83a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Fri, 1 Dec 2023 14:09:32 +0100
Subject: [PATCH 24/57] fix(pipeline): fix support for setting `num_speakers`
in diarization pipeline
---
CHANGELOG.md | 8 ++++++++
pyannote/audio/pipelines/clustering.py | 9 ++++++++-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 346d8ad26..3e0a93dbe 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,14 @@
## `develop` branch
+### TL;DR
+
+Providing `num_speakers` to [`pyannote/speaker-diarization-3.1`](https://hf.co/pyannote/speaker-diarization-3.1) now [works as expected](https://github.com/pyannote/pyannote-audio/issues/1567).
+
+### Fixes
+
+- fix(pipeline): fix support for setting `num_speakers` in [`pyannote/speaker-diarization-3.1`](https://hf.co/pyannote/speaker-diarization-3.1) pipeline
+
## Version 3.1.0 (2023-11-16)
### TL;DR
diff --git a/pyannote/audio/pipelines/clustering.py b/pyannote/audio/pipelines/clustering.py
index b63ab214f..80098ea24 100644
--- a/pyannote/audio/pipelines/clustering.py
+++ b/pyannote/audio/pipelines/clustering.py
@@ -97,7 +97,13 @@ def filter_embeddings(
speaker_idx : (num_embeddings, ) array
"""
- chunk_idx, speaker_idx = np.where(~np.any(np.isnan(embeddings), axis=2))
+ # whether speaker is active
+ active = np.sum(segmentations.data, axis=1) > 0
+ # whether speaker embedding extraction went fine
+ valid = ~np.any(np.isnan(embeddings), axis=2)
+
+ # indices of embeddings that are both active and valid
+ chunk_idx, speaker_idx = np.where(active * valid)
# sample max_num_embeddings embeddings
num_embeddings = len(chunk_idx)
@@ -240,6 +246,7 @@ def __call__(
)
num_embeddings, _ = train_embeddings.shape
+
num_clusters, min_clusters, max_clusters = self.set_num_clusters(
num_embeddings,
num_clusters=num_clusters,
From 1a8f619924794f2edf96d87994ff9d9a25ba1d6c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Fri, 1 Dec 2023 14:17:43 +0100
Subject: [PATCH 25/57] doc: getting ready for 3.1.1
---
README.md | 65 ++++++++++++++++++++++---------------------
tutorials/intro.ipynb | 22 +++++++--------
version.txt | 2 +-
3 files changed, 45 insertions(+), 44 deletions(-)
diff --git a/README.md b/README.md
index b7621210e..a82a2488f 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-Using `pyannote.audio` open-source toolkit in production?
+Using `pyannote.audio` open-source toolkit in production?
Make the most of it thanks to our [consulting services](https://herve.niderb.fr/consulting.html).
# `pyannote.audio` speaker diarization toolkit
@@ -9,15 +9,13 @@ Make the most of it thanks to our [consulting services](https://herve.niderb.fr/
-
## TL;DR
-1. Install [`pyannote.audio`](https://github.com/pyannote/pyannote-audio) `3.1` with `pip install pyannote.audio`
+1. Install [`pyannote.audio`](https://github.com/pyannote/pyannote-audio) with `pip install pyannote.audio`
2. Accept [`pyannote/segmentation-3.0`](https://hf.co/pyannote/segmentation-3.0) user conditions
3. Accept [`pyannote/speaker-diarization-3.1`](https://hf.co/pyannote/speaker-diarization-3.1) user conditions
4. Create access token at [`hf.co/settings/tokens`](https://hf.co/settings/tokens).
-
```python
from pyannote.audio import Pipeline
pipeline = Pipeline.from_pretrained(
@@ -47,50 +45,53 @@ for turn, _, speaker in diarization.itertracks(yield_label=True):
- :snake: Python-first API
- :zap: multi-GPU training with [pytorch-lightning](https://pytorchlightning.ai/)
-
## Documentation
- [Changelog](CHANGELOG.md)
- [Frequently asked questions](FAQ.md)
- Models
- - Available tasks explained
- - [Applying a pretrained model](tutorials/applying_a_model.ipynb)
- - [Training, fine-tuning, and transfer learning](tutorials/training_a_model.ipynb)
+ - Available tasks explained
+ - [Applying a pretrained model](tutorials/applying_a_model.ipynb)
+ - [Training, fine-tuning, and transfer learning](tutorials/training_a_model.ipynb)
- Pipelines
- - Available pipelines explained
- - [Applying a pretrained pipeline](tutorials/applying_a_pipeline.ipynb)
- - [Adapting a pretrained pipeline to your own data](tutorials/adapting_pretrained_pipeline.ipynb)
- - [Training a pipeline](tutorials/voice_activity_detection.ipynb)
+ - Available pipelines explained
+ - [Applying a pretrained pipeline](tutorials/applying_a_pipeline.ipynb)
+ - [Adapting a pretrained pipeline to your own data](tutorials/adapting_pretrained_pipeline.ipynb)
+ - [Training a pipeline](tutorials/voice_activity_detection.ipynb)
- Contributing
- - [Adding a new model](tutorials/add_your_own_model.ipynb)
- - [Adding a new task](tutorials/add_your_own_task.ipynb)
- - Adding a new pipeline
- - Sharing pretrained models and pipelines
+ - [Adding a new model](tutorials/add_your_own_model.ipynb)
+ - [Adding a new task](tutorials/add_your_own_task.ipynb)
+ - Adding a new pipeline
+ - Sharing pretrained models and pipelines
- Blog
- - 2022-12-02 > ["How I reached 1st place at Ego4D 2022, 1st place at Albayzin 2022, and 6th place at VoxSRC 2022 speaker diarization challenges"](tutorials/adapting_pretrained_pipeline.ipynb)
- - 2022-10-23 > ["One speaker segmentation model to rule them all"](https://herve.niderb.fr/fastpages/2022/10/23/One-speaker-segmentation-model-to-rule-them-all)
- - 2021-08-05 > ["Streaming voice activity detection with pyannote.audio"](https://herve.niderb.fr/fastpages/2021/08/05/Streaming-voice-activity-detection-with-pyannote.html)
+ - 2022-12-02 > ["How I reached 1st place at Ego4D 2022, 1st place at Albayzin 2022, and 6th place at VoxSRC 2022 speaker diarization challenges"](tutorials/adapting_pretrained_pipeline.ipynb)
+ - 2022-10-23 > ["One speaker segmentation model to rule them all"](https://herve.niderb.fr/fastpages/2022/10/23/One-speaker-segmentation-model-to-rule-them-all)
+ - 2021-08-05 > ["Streaming voice activity detection with pyannote.audio"](https://herve.niderb.fr/fastpages/2021/08/05/Streaming-voice-activity-detection-with-pyannote.html)
- Videos
- [Introduction to speaker diarization](https://umotion.univ-lemans.fr/video/9513-speech-segmentation-and-speaker-diarization/) / JSALT 2023 summer school / 90 min
- [Speaker segmentation model](https://www.youtube.com/watch?v=wDH2rvkjymY) / Interspeech 2021 / 3 min
- - [First releaase of pyannote.audio](https://www.youtube.com/watch?v=37R_R82lfwA) / ICASSP 2020 / 8 min
+ - [First releaase of pyannote.audio](https://www.youtube.com/watch?v=37R_R82lfwA) / ICASSP 2020 / 8 min
## Benchmark
-Out of the box, `pyannote.audio` speaker diarization [pipeline](https://hf.co/pyannote/speaker-diarization-3.1) v3.1 is expected to be much better (and faster) than v2.x.
+Out of the box, `pyannote.audio` speaker diarization [pipeline](https://hf.co/pyannote/speaker-diarization-3.1) v3.1 is expected to be much better (and faster) than v2.x.
Those numbers are diarization error rates (in %):
-| Dataset \ Version | v1.1 | [v2.1](https://hf.co/pyannote/speaker-diarization-2.1) | [v3.1](https://hf.co/pyannote/speaker-diarization-3.1) | Premium |
-| ---------------------- | ---- | ----- | ------ | --------- |
-| AISHELL-4 | - | 14.1 | 12.2 | 12.3 |
-| AliMeeting (channel 1) | - | 27.4 | 24.4 | 19.4 |
-| AMI (IHM) | 29.7 | 18.9 | 18.8 | 16.7 |
-| AMI (SDM) | - | 27.1 | 22.4 | 20.1 |
-| AVA-AVD | - | - | 50.0 | 42.7 |
-| DIHARD 3 (full) | 29.2 | 26.9 | 21.7 | 17.0 |
-| MSDWild | - | - | 25.3 | 20.4 |
-| REPERE (phase2) | - | 8.2 | 7.8 | 7.8 |
-| VoxConverse (v0.3) | 21.5 | 11.2 | 11.3 | 9.5 |
+| Benchmark | [v2.1](https://hf.co/pyannote/speaker-diarization-2.1) | [v3.1](https://hf.co/pyannote/speaker-diarization-3.1) | [Premium](https://forms.gle/eKhn7H2zTa68sMMx8) |
+| ---------------------- | ------------------------------------------------------ | ------------------------------------------------------ | ---------------------------------------------- |
+| AISHELL-4 | 14.1 | 12.3 | 11.9 |
+| AliMeeting (channel 1) | 27.4 | 24.5 | 22.5 |
+| AMI (IHM) | 18.9 | 18.8 | 16.6 |
+| AMI (SDM) | 27.1 | 22.6 | 20.9 |
+| AVA-AVD | 66.3 | 50.0 | 39.8 |
+| CALLHOME (part 2) | 31.6 | 28.4 | 22.2 |
+| DIHARD 3 (full) | 26.9 | 21.4 | 17.2 |
+| Ego4D (dev.) | 61.5 | 51.2 | 43.8 |
+| MSDWild | 32.8 | 25.4 | 19.8 |
+| REPERE (phase2) | 8.2 | 7.8 | 7.6 |
+| VoxConverse (v0.3) | 11.2 | 11.2 | 9.4 |
+
+[Diarization error rate](http://pyannote.github.io/pyannote-metrics/reference.html#diarization) (in %)
## Citations
diff --git a/tutorials/intro.ipynb b/tutorials/intro.ipynb
index 2aaa911f7..572ea2f6d 100644
--- a/tutorials/intro.ipynb
+++ b/tutorials/intro.ipynb
@@ -3,8 +3,8 @@
{
"cell_type": "markdown",
"metadata": {
- "id": "view-in-github",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "view-in-github"
},
"source": [
""
@@ -53,7 +53,7 @@
},
"outputs": [],
"source": [
- "!pip install -qq pyannote.audio==3.1.0\n",
+ "!pip install -qq pyannote.audio==3.1.1\n",
"!pip install -qq ipython==7.34.0"
]
},
@@ -115,7 +115,7 @@
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAl9UlEQVR4nO3de3RV5Z0//k8gEEJIjpAYAnJzitzECpWZqnTVajtKRwutq/WCIkjrZU3VTlupdrqsOi67rFOko1ZbrTq9UGnroF+trbd6RSq2CK1WRlFBRblowBAEApL9+4OfZ4xccjsnJzu8XmtlLbL3s5/97PPs8znn8M7ZuyhJkiQAAAAAAABSrFuhBwAAAAAAANBeAg8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2Bx27MmDEjioqKdvl56aWX9rhu0qRJ2e2HDRu22zZXXXVVts1rr70Wn/vc56KsrCyqqqriggsuiG3btmXXb926NWbMmBGHHHJIFBcXx+c///ldxrlgwYKYOHFiVFZWRmlpaYwaNSrmzJmT18cm7dIytxERDQ0N8Z3vfCeGDh0aJSUl8ZGPfCRuvfXWvD02aff+/J177rm7rPvXf/3XKCoqihkzZjRp21nm+n1PPvlkFBcXx7hx49r1WHR1uZ7riIiFCxfGv/zLv0Tfvn2jV69eccghh8Ts2bNjx44dTdpt2LAhpk2bFplMJjKZTEybNi3eeeedJm2+9rWvxWGHHRYlJSW7ncuVK1fudkz33Xdfux4XAAAAgH1dcSF2uuHdbc03yqG+ZT1bvc2kSZPitttua7Js//333+O6kpKSJr//x3/8R5x11llNlpWXl0dExI4dO+L444+P/fffPxYsWBC1tbUxffr0SJIkrrvuumyb0tLSuOCCC+J//ud/djvGsrKyOO+88+KjH/1olJWVxYIFC+Kcc86JsrKyOPvss1t9zLlQ11DXYfvKlGTatF0a5jYi4qSTToq1a9fGLbfcEsOHD49169bFe++916Zjbq8tdVs7dH+lmV5t2m7w4MExb968mDNnTpSWlkbEztDh9ttvjyFDhjRp25nmOiKirq4uzjjjjPj0pz8da9eubf3B58iO2toO3V/3yso2bZfLub7zzjvjpJNOijPPPDMeeeSR2G+//eKhhx6Kb33rW/HUU0/Fb37zmygqKoqIiKlTp8aqVauy4cTZZ58d06ZNi3vuuSfbX5IkMXPmzFi0aFH87W9/2+MxPPTQQ3HwwQdnf+/Xr1+bHgsAAAAAdipI4PHZqx/p0P09dflxrd6mpKQkampqWr3ufeXl5Xts88ADD8Tzzz8fr7/+egwcODAiImbPnh0zZsyIK6+8MioqKqKsrCxuvPHGiNj5V98f/gviiIjx48fH+PHjs78PGzYs5s+fH0888UTBAo9pf5jaYfu6+/P3tmm7NMztfffdF4899li88sor2f8EHTZsWAuPMPd+fsYdHbq/c/7f6W3a7mMf+1i88sorMX/+/DjttNMiImL+/PkxePDg+Id/+IcmbTvLXL/vnHPOialTp0b37t3jrrvuauER596aj47r0P0d8MbrbdouV3P97rvvxllnnRWTJ0+Om266Kbv8K1/5SvTv3z8mT54cv/nNb+Lkk0+OZcuWxX333RdPPfVUfPzjH4+IiJtvvjmOOOKIeOGFF2LkyJEREXHttddGRMRbb72118CjsrKy2XMQAAAAgJZzSasC+NOf/hRjx47N/idpRMRxxx0XDQ0NsXjx4jb3u2TJkli4cGEcddRRuRgmbZCrub377rtjwoQJcfXVV8cBBxwQI0aMiAsvvDC2bNmSj2F3KWeeeWaTv+a/9dZbY+bMmTnfTy6fx7fddlu8/PLLcemll+Z6mF1aLub6gQceiNra2rjwwgt3Wfe5z30uRowYEbfffntE7JzzTCaTDTsiIg4//PDIZDKxcOHCVo9/8uTJUV1dHRMnTow77ujYUBEAAACgKxJ47MHvfve76NOnT/bnS1/60h7X9enTJ6644oom21900UW7tHn00UcjImLNmjXRv3//Ju379u0bPXv2jDVr1rR6rIMGDYqSkpKYMGFCfPWrX42vfOUrrT/gfUga5vaVV16JBQsWxHPPPRd33nln/PCHP4w77rgjvvrVr7b9wPcR06ZNiwULFsTKlSvj1VdfjSeffDJOP33Xb4x0lrlevnx5XHzxxTF37twoLi7Il+5SKxdz/eKLL0ZExOjRo3e7j1GjRmXbrFmzJqqrq3dpU11d3ao579OnT1xzzTVxxx13xO9///v49Kc/HSeffHL88pe/bHEfAAAAAOzK/67twdFHH529FE3Ezvtl7GldxK7XXp81a1b2prnvO+CAA7L/fv968B+UJMlulzfniSeeiE2bNsVTTz0VF198cQwfPjxOPfXUVvezr0jD3DY2NkZRUVHMnTs3Mpmd9yq55ppr4otf/GL86Ec/yt6zgF1VVVXF8ccfHz/72c8iSZI4/vjjo6qqapd2nWGud+zYEVOnTo3LL788RowY0aJt+D+5nOskSXa7jw/PZy6e31VVVfH1r389+/uECRNiw4YNcfXVV+82sAEAAACgZQoSePzhW0cXYretUlZWFsOHD2/1uvdVVVXtsU1NTU0sWrSoybINGzbE9u3bd/mL8ZY48MADIyLikEMOibVr18Zll11WsMDjF5/9VUH22xppmNsBAwbEAQcckA07Inb+BXqSJLFq1ao46KCDWtxXLpzx8y926P7aa+bMmXHeeedFRMSPfvSj3bbpDHNdX18ff/nLX2LJkiXZ8TY2NkaSJFFcXBwPPPBAHHPMMS3qK1dq/ra0Q/fXXu2d6/eDpmXLlsWRRx65y/r//d//jTFjxkTEzjnf3Q3l33rrrTbV7g86/PDD46c//Wm7+gAAAADY1xUk8Ohb1rMQu+00jjjiiLjyyitj9erVMWDAgIjYeR35kpKSOOyww9rVd5Ik0dDQkIthtkmmJNN8oy4sV3M7ceLE+O1vfxubNm2KPn36RMTOS+9069YtBg0alJex701ppleH77M9Jk2aFNu2bYuInffVyIdczHVFRUU8++yzTZbdcMMN8fDDD8cdd9yRDTM7UvfKyg7fZ3u0d66PPfbY6NevX8yePXuXwOPuu++O5cuXZy9/dcQRR0RdXV08/fTT8U//9E8REbFo0aKoq6vbbVjSGkuWLMmeRwAAAAC0jUtatUFDQ8Mu12svLi5ucimV+vr6Xdr07t07Kioq4thjj40xY8bEtGnT4j//8z9j/fr1ceGFF8ZZZ50VFRUV2fbPP/98bNu2LdavXx/19fWxdOnSiIgYN25cROz8a+YhQ4bEqFGjIiJiwYIF8YMf/CDOP//8PBz1vqGzzO3UqVPjiiuuiDPPPDMuv/zyePvtt2PWrFkxc+ZMl7Nqge7du8eyZcuy/96dzjDX3bp1i7Fjxzbpv7q6Onr16rXLcnavvXNdVlYWP/nJT+KUU06Js88+O84777yoqKiIP/7xjzFr1qz44he/GCeddFJE7PyW1aRJk+Kss86Kn/zkJxERcfbZZ8cJJ5wQI0eOzPb90ksvxaZNm2LNmjWxZcuW7JyPGTMmevbsGT/72c+iR48eMX78+OjWrVvcc889ce2118b3v//9XD88AAAAAPuWhF1Mnz49mTJlyh7XRcQuPyNHjsy2GTp06G7bnHPOOdk2r776anL88ccnpaWlSb9+/ZLzzjsv2bp1a5N97amf91177bXJwQcfnPTu3TupqKhIxo8fn9xwww3Jjh07cvuAdCFpmdskSZJly5Yln/nMZ5LS0tJk0KBByTe+8Y1k8+bNuXswupi9zW2SJMmUKVOS6dOnZ9t2prn+oEsvvTQ59NBD2/QY7CtyPddJkiSPP/54MmnSpCSTySQ9e/ZMxowZk/zgBz9I3nvvvSbtamtrk9NOOy0pLy9PysvLk9NOOy3ZsGFDkzZHHXXUbve5YsWKJEmS5L//+7+T0aNHJ717907Ky8uTww47LPnFL37R3ocFAAAAYJ9XlCR7uFMrAAAAAABASnQr9AAAAAAAAADaS+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6hXnq+PGxsZ48803o7y8PIqKivK1GwAAAAAAIAWSJIn6+voYOHBgdOuW++9j5C3wePPNN2Pw4MH56h4AAAAAAEih119/PQYNGpTzfvMWeJSXl0fEzoFXVFTkazcAAAAAAEAKbNy4MQYPHpzND3Itb4HH+5exqqioEHgAAAAAAAAREXm7DYablgMAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9vAce767fnO9dQJe0avUb8YPfzo5Vq98o9FDoYtZvXR+/WjY31m9dHxE76/Rfbv9rTut1PvpMsx1r18bG2dfEjrVrCz2UnOkKx5Sr8zRN5/uHn//tbQe5lo9z74N9vl3fEDc/8lK8Xd+Qs/7Zt3XkOaU2A+RPV6mxXeU4IrrWsdCx8h54bHlna753AV3Smto18XiPh2NN7ZpCD4UuZsPW9THvhV/Fhv//TcPmDVti8bxnY/OGLTnbRz76TLMd69ZF/TVzYse6dYUeSs50hWPK1XmapvP9w8//9raDXMvHuffBPt+ub4hbHn1Z4EHOdOQ5pTYD5E9XqbFd5Tgiutax0LFc0goAAAAAAEg9gQcAAAAAAJB6xfneQcO722JLnctaQWtt27w9IiLefe/dqGuoK/Bo6Eo2bdu02+UNm3JXrxs2bctJP11N4zt1saO2ttDDyInGd7pOXWrvuZ/G833Ttk17fW3ZU52AjtLcOdravj6sfsv22PBu+p67dD71W7Z3+D5z+fwAYKeu9v63K7xWdLU5oePkPfC4/8rHorRHab53A11OXeX6iMkRs1d+P2JloUfDvuDe7/6x0EPo8mpPObXQQ2A39sVz/5KF3yn0EGCv8n2Onv/zv+S1f8gnNRyA5nitYF/mklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpl/d7eBz3naNi2Ngh+d4NdDlLXlgaC968P7457KIYN/rQQg+HLmRl3YrdXs/z+P/4dFQO65uTfdSu3LBP3hehOZXzbo8eY0YXehg5sf35ZV3mniTtPffTeL5fceSVMSxz4B7X76lOQEdp7hxtjd2dz9edMSGG15TnpH/2bS+tqe/we8Lk8vkBwE5d7f1vV3it6GpzQsfJe+BRUtYzSjO98r0b6HJ69u4RERFlxWWRKckUeDR0JX169tnt8pI+uavXJX165qSfrqbbfpnoXllZ6GHkxI79uk5dau+5n8bzvU/PPnt9bdlTnYCO0tw52tq+Pqy8tEf0LUvfc5fOp7y0R4fvM5fPDwB26mrvf7vCa0VXmxM6jktaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASL28Bx6l+7lhObRFTWVNfHL7MVFTWVPoodDF9O3VL04ZOTX69uoXERG9+5bGYaccEr37luZsH/noM826V1dH+Te+Ht2rqws9lJzpCseUq/M0Tef7h5//7W0HuZaPc++DfVaVl8SXP/WRqCovyVn/7Ns68pxSmwHyp6vU2K5yHBFd61joWEVJkiT56Hjjxo2RyWSirq4uKioq8rELAAAAAAAgJfKdG7ikFQAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw6ALWb10fv1o2N9ZvXd8p+sm33Y2zvWN/u74hbn7kpXi7viEn7XIlX/vr6OMg/8xpunWW+ess4wA6H/WBQmnJ+/y2fBbYsXZtbJx9TexYu7ZN42rv9sBOrXl98VrUsQrxeJvj3POY0hnV5vl8FHh0ARu2ro95L/wqNrQzqMhVP/m2u3G2d+xv1zfELY++3KLAoyXtciVf++vo4yD/zGm6dZb56yzjADof9YFCacn7/LZ8Ftixbl3UXzMndqxb16ZxtXd7YKfWvL54LepYhXi8zXHueUzpjGo3CTwAAAAAAAD2SuABAAAAAACknsADAAAAAABIveJCD4Dc2bRtU9Q11LVr+zT54PHmauz1W7bHhne37XV9ITQ3rrb0R9eU63OFjtHZnpPOI+DDOludYt+zt8867fks0PhOXeyorW3TdkDutOT9p9eiwujIzwbmOH98xqMzqd/yXl77F3h0IZcs/E6hh9Ch8nG85//8LznvMxc667jofJwr5ILzCIDOJl+fdWpPOTUv/QKt4/1n52VuugbzSGfyXsO7ee3fJa0AAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSzz08upArjrwyhmUObPP2K+tWpOo+IB883lyN/bozJsTwmvI9rn9pTX1BrnvY3Lhaq1DHQf7l+lyhY3S256TzCPiwzlan2Pfs7bNOez4LVM67PXqMGd3q7bY/v8z9PyCHWvL+02tRYXTkZwNznD8+49GZLF3+Zhzz/fz1L/DoQvr07BOZkky7tk+TDx5vrsZeXtoj+pb13Ov6QmhuXG3pj64p1+cKHaOzPSedR8CHdbY6xb5nb5912vNZoNt+meheWdnq7Xbs1/bPXcCuWvL+02tRYXTkZwNznD8+49GZlJfmN5JwSSsAAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3h0AX179YtTRk6Nvr36dYp+8m1342zv2KvKS+LLn/pIVJWX5KRdruRrfx19HOSfOU23zjJ/nWUcQOejPlAoLXmf35bPAt2rq6P8G1+P7tXVbRpXe7cHdmrN64vXoo5ViMfbHOeex5TOqLJPfs/HoiRJknx0vHHjxshkMlFXVxcVFRX52AUAAAAAAJAS+c4NfMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/DYh7xd3xA3P/JSvF3f0Kb1AEDE+q3r41fL5sb6retbtBwAAADoGAKPfcjb9Q1xy6Mv7zXw2Nt6ACBiw9b1Me+FX8WGDwUbe1oOAAAAdAyBBwAAAAAAkHoCDwAAAAAAIPWKCz0AOl79lu2x4d1tu10OALTMpm2boq6hrsnvAAAAQOEIPPZB5//8L4UeAgCk3iULv1PoIQAAAAAf4JJWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqeceHvug686YEMNryndZ/tKaevf3AIAWuuLIK2NY5sDs7yvrVrivBwAAABSQwGMfVF7aI/qW9dztcgCgZfr07BOZkkyT3wEAAIDCcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw2IdUlZfElz/1kagqL2nTegAgom+vfnHKyKnRt1e/Fi0HAAAAOkZRkiRJPjreuHFjZDKZqKuri4qKinzsAgAAAAAASIl85wa+4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1ivPVcZIkERGxcePGfO0CAAAAAABIiffzgvfzg1zLW+BRW1sbERGDBw/O1y4AAAAAAICUqa2tjUwmk/N+8xZ49OvXLyIiXnvttbwMHEiHjRs3xuDBg+P111+PioqKQg8HKAB1AFAHgAi1AFAHgIi6uroYMmRINj/ItbwFHt267bw9SCaTUcCAqKioUAtgH6cOAOoAEKEWAOoA8H/5Qc77zUuvAAAAAAAAHUjgAQAAAAAApF7eAo+SkpK49NJLo6SkJF+7AFJALQDUAUAdACLUAkAdAPJfB4qSJEny0jMAAAAAAEAHcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqdfqwOONN96I008/PSorK6N3794xbty4WLx4cXZ9kiRx2WWXxcCBA6O0tDQ+9alPxd///vcmfTQ0NMT5558fVVVVUVZWFpMnT45Vq1a1/2iADtFcHZg/f34cd9xxUVVVFUVFRbF06dJd+lAHIP32Vgu2b98eF110URxyyCFRVlYWAwcOjDPOOCPefPPNJn2oBZBuzb0nuOyyy2LUqFFRVlYWffv2jc985jOxaNGiJn2oA5BuzdWBDzrnnHOiqKgofvjDHzZZrg5A+jVXC2bMmBFFRUVNfg4//PAmfagFkG4teU+wbNmymDx5cmQymSgvL4/DDz88Xnvttez6XNSBVgUeGzZsiIkTJ0aPHj3iD3/4Qzz//PMxe/bs2G+//bJtrr766rjmmmvi+uuvjz//+c9RU1MT//zP/xz19fXZNv/2b/8Wd955Z8ybNy8WLFgQmzZtihNOOCF27NjRqsEDHa8ldeDdd9+NiRMnxlVXXbXHftQBSLfmasHmzZvjmWeeiUsuuSSeeeaZmD9/frz44osxefLkJv2oBZBeLXlPMGLEiLj++uvj2WefjQULFsSwYcPi2GOPjbfeeivbRh2A9GpJHXjfXXfdFYsWLYqBAwfusk4dgHRraS2YNGlSrF69Ovvz+9//vsl6tQDSqyV14OWXX45PfOITMWrUqHj00Ufjr3/9a1xyySXRq1evbJuc1IGkFS666KLkE5/4xB7XNzY2JjU1NclVV12VXbZ169Ykk8kkP/7xj5MkSZJ33nkn6dGjRzJv3rxsmzfeeCPp1q1bct9997VmOEABNFcHPmjFihVJRCRLlixpslwdgPRrTS1439NPP51ERPLqq68mSaIWQNq1pQ7U1dUlEZE89NBDSZKoA5B2La0Dq1atSg444IDkueeeS4YOHZrMmTMnu04dgPRrSS2YPn16MmXKlD2uVwsg3VpSB04++eTk9NNP3+P6XNWBVn3D4+67744JEybEl770paiuro7x48fHzTffnF2/YsWKWLNmTRx77LHZZSUlJXHUUUfFwoULIyJi8eLFsX379iZtBg4cGGPHjs22ATqv5upAS6gDkH5tqQV1dXVRVFSU/QsPtQDSrbV1YNu2bXHTTTdFJpOJQw89NCLUAUi7ltSBxsbGmDZtWsyaNSsOPvjgXfpQByD9Wvqe4NFHH43q6uoYMWJEnHXWWbFu3brsOrUA0q25OtDY2Bj33ntvjBgxIo477riorq6Oj3/843HXXXdl2+SqDrQq8HjllVfixhtvjIMOOijuv//+OPfcc+OCCy6In//85xERsWbNmoiI6N+/f5Pt+vfvn123Zs2a6NmzZ/Tt23ePbYDOq7k60BLqAKRfa2vB1q1b4+KLL46pU6dGRUVFRKgFkHYtrQO/+93vok+fPtGrV6+YM2dOPPjgg1FVVRUR6gCkXUvqwPe///0oLi6OCy64YLd9qAOQfi2pBZ/97Gdj7ty58fDDD8fs2bPjz3/+cxxzzDHR0NAQEWoBpF1zdWDdunWxadOmuOqqq2LSpEnxwAMPxBe+8IU48cQT47HHHouI3NWB4tYMvLGxMSZMmBDf+973IiJi/Pjx8fe//z1uvPHGOOOMM7LtioqKmmyXJMkuyz6sJW2AwmtpHWgLdQDSozW1YPv27XHKKadEY2Nj3HDDDc32rRZAOrS0Dhx99NGxdOnSePvtt+Pmm2+Ok046KRYtWhTV1dV77FsdgHRorg4sXrw4/uu//iueeeaZVj+n1QFIj5a8Jzj55JOz7ceOHRsTJkyIoUOHxr333hsnnnjiHvtWCyAdmqsDjY2NERExZcqU+PrXvx4REePGjYuFCxfGj3/84zjqqKP22Hdr60CrvuExYMCAGDNmTJNlo0ePzt5JvaamJiJil8Rl3bp12W991NTUxLZt22LDhg17bAN0Xs3VgZZQByD9WloLtm/fHieddFKsWLEiHnzwwey3OyLUAki7ltaBsrKyGD58eBx++OFxyy23RHFxcdxyyy0RoQ5A2jVXB5544olYt25dDBkyJIqLi6O4uDheffXV+OY3vxnDhg2LCHUAuoK2/D/BgAEDYujQobF8+fKIUAsg7ZqrA1VVVVFcXNxstpCLOtCqwGPixInxwgsvNFn24osvxtChQyMi4sADD4yampp48MEHs+u3bdsWjz32WBx55JEREXHYYYdFjx49mrRZvXp1PPfcc9k2QOfVXB1oCXUA0q8lteD9sGP58uXx0EMPRWVlZZP2agGkW1vfEyRJkr18hToA6dZcHZg2bVr87W9/i6VLl2Z/Bg4cGLNmzYr7778/ItQB6Ara8p6gtrY2Xn/99RgwYEBEqAWQds3VgZ49e8Y//uM/7rVNzupAi29vniTJ008/nRQXFydXXnllsnz58mTu3LlJ7969k1/+8pfZNldddVWSyWSS+fPnJ88++2xy6qmnJgMGDEg2btyYbXPuuecmgwYNSh566KHkmWeeSY455pjk0EMPTd57773WDAcogJbUgdra2mTJkiXJvffem0REMm/evGTJkiXJ6tWrs23UAUi35mrB9u3bk8mTJyeDBg1Kli5dmqxevTr709DQkO1HLYD0aq4ObNq0Kfn2t7+d/OlPf0pWrlyZLF68OPnyl7+clJSUJM8991y2H3UA0qslnw0+bOjQocmcOXOaLFMHIN2aqwX19fXJN7/5zWThwoXJihUrkkceeSQ54ogjkgMOOMD/F0IX0ZL3BPPnz0969OiR3HTTTcny5cuT6667LunevXvyxBNPZNvkog60KvBIkiS55557krFjxyYlJSXJqFGjkptuuqnJ+sbGxuTSSy9NampqkpKSkuSTn/xk8uyzzzZps2XLluS8885L+vXrl5SWliYnnHBC8tprr7V2KECBNFcHbrvttiQidvm59NJLs23UAUi/vdWCFStW7LYORETyyCOPZNupBZBue6sDW7ZsSb7whS8kAwcOTHr27JkMGDAgmTx5cvL000836UMdgHRr7rPBh+0u8FAHIP32Vgs2b96cHHvsscn++++f9OjRIxkyZEgyffr0XZ7nagGkW0veE9xyyy3J8OHDk169eiWHHnpoctdddzVZn4s6UJQkSdKKb6cAAAAAAAB0Oq26hwcAAAAAAEBnJPAAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AACAdrvsssti3LhxhR4GAACwDytKkiQp9CAAAIDOq6ioaK/rp0+fHtdff300NDREZWVlB40KAACgKYEHAACwV2vWrMn++9e//nV897vfjRdeeCG7rLS0NDKZTCGGBgAAkOWSVgAAwF7V1NRkfzKZTBQVFe2y7MOXtJoxY0Z8/vOfj+9973vRv3//2G+//eLyyy+P9957L2bNmhX9+vWLQYMGxa233tpkX2+88UacfPLJ0bdv36isrIwpU6bEypUrO/aAAQCAVBJ4AAAAefHwww/Hm2++GY8//nhcc801cdlll8UJJ5wQffv2jUWLFsW5554b5557brz++usREbF58+Y4+uijo0+fPvH444/HggULok+fPjFp0qTYtm1bgY8GAADo7AQeAABAXvTr1y+uvfbaGDlyZMycOTNGjhwZmzdvjn//93+Pgw46KL797W9Hz54948knn4yIiHnz5kW3bt3ipz/9aRxyyCExevTouO222+K1116LRx99tLAHAwAAdHrFhR4AAADQNR188MHRrdv//Y1V//79Y+zYsdnfu3fvHpWVlbFu3bqIiFi8eHG89NJLUV5e3qSfrVu3xssvv9wxgwYAAFJL4AEAAORFjx49mvxeVFS022WNjY0REdHY2BiHHXZYzJ07d5e+9t9///wNFAAA6BIEHgAAQKfwsY99LH79619HdXV1VFRUFHo4AABAyriHBwAA0CmcdtppUVVVFVOmTIknnngiVqxYEY899lh87Wtfi1WrVhV6eAAAQCcn8AAAADqF3r17x+OPPx5DhgyJE088MUaPHh0zZ86MLVu2+MYHAADQrKIkSZJCDwIAAAAAAKA9fMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOr9fw+gShyFf/1LAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAl9UlEQVR4nO3de3RV5Z0//k8gEEJIjpAYAnJzitzECpWZqnTVajtKRwutq/WCIkjrZU3VTlupdrqsOi67rFOko1ZbrTq9UGnroF+trbd6RSq2CK1WRlFBRblowBAEApL9+4OfZ4xccjsnJzu8XmtlLbL3s5/97PPs8znn8M7ZuyhJkiQAAAAAAABSrFuhBwAAAAAAANBeAg8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2Bx27MmDEjioqKdvl56aWX9rhu0qRJ2e2HDRu22zZXXXVVts1rr70Wn/vc56KsrCyqqqriggsuiG3btmXXb926NWbMmBGHHHJIFBcXx+c///ldxrlgwYKYOHFiVFZWRmlpaYwaNSrmzJmT18cm7dIytxERDQ0N8Z3vfCeGDh0aJSUl8ZGPfCRuvfXWvD02aff+/J177rm7rPvXf/3XKCoqihkzZjRp21nm+n1PPvlkFBcXx7hx49r1WHR1uZ7riIiFCxfGv/zLv0Tfvn2jV69eccghh8Ts2bNjx44dTdpt2LAhpk2bFplMJjKZTEybNi3eeeedJm2+9rWvxWGHHRYlJSW7ncuVK1fudkz33Xdfux4XAAAAgH1dcSF2uuHdbc03yqG+ZT1bvc2kSZPitttua7Js//333+O6kpKSJr//x3/8R5x11llNlpWXl0dExI4dO+L444+P/fffPxYsWBC1tbUxffr0SJIkrrvuumyb0tLSuOCCC+J//ud/djvGsrKyOO+88+KjH/1olJWVxYIFC+Kcc86JsrKyOPvss1t9zLlQ11DXYfvKlGTatF0a5jYi4qSTToq1a9fGLbfcEsOHD49169bFe++916Zjbq8tdVs7dH+lmV5t2m7w4MExb968mDNnTpSWlkbEztDh9ttvjyFDhjRp25nmOiKirq4uzjjjjPj0pz8da9eubf3B58iO2toO3V/3yso2bZfLub7zzjvjpJNOijPPPDMeeeSR2G+//eKhhx6Kb33rW/HUU0/Fb37zmygqKoqIiKlTp8aqVauy4cTZZ58d06ZNi3vuuSfbX5IkMXPmzFi0aFH87W9/2+MxPPTQQ3HwwQdnf+/Xr1+bHgsAAAAAdipI4PHZqx/p0P09dflxrd6mpKQkampqWr3ufeXl5Xts88ADD8Tzzz8fr7/+egwcODAiImbPnh0zZsyIK6+8MioqKqKsrCxuvPHGiNj5V98f/gviiIjx48fH+PHjs78PGzYs5s+fH0888UTBAo9pf5jaYfu6+/P3tmm7NMztfffdF4899li88sor2f8EHTZsWAuPMPd+fsYdHbq/c/7f6W3a7mMf+1i88sorMX/+/DjttNMiImL+/PkxePDg+Id/+IcmbTvLXL/vnHPOialTp0b37t3jrrvuauER596aj47r0P0d8MbrbdouV3P97rvvxllnnRWTJ0+Om266Kbv8K1/5SvTv3z8mT54cv/nNb+Lkk0+OZcuWxX333RdPPfVUfPzjH4+IiJtvvjmOOOKIeOGFF2LkyJEREXHttddGRMRbb72118CjsrKy2XMQAAAAgJZzSasC+NOf/hRjx47N/idpRMRxxx0XDQ0NsXjx4jb3u2TJkli4cGEcddRRuRgmbZCrub377rtjwoQJcfXVV8cBBxwQI0aMiAsvvDC2bNmSj2F3KWeeeWaTv+a/9dZbY+bMmTnfTy6fx7fddlu8/PLLcemll+Z6mF1aLub6gQceiNra2rjwwgt3Wfe5z30uRowYEbfffntE7JzzTCaTDTsiIg4//PDIZDKxcOHCVo9/8uTJUV1dHRMnTow77ujYUBEAAACgKxJ47MHvfve76NOnT/bnS1/60h7X9enTJ6644oom21900UW7tHn00UcjImLNmjXRv3//Ju379u0bPXv2jDVr1rR6rIMGDYqSkpKYMGFCfPWrX42vfOUrrT/gfUga5vaVV16JBQsWxHPPPRd33nln/PCHP4w77rgjvvrVr7b9wPcR06ZNiwULFsTKlSvj1VdfjSeffDJOP33Xb4x0lrlevnx5XHzxxTF37twoLi7Il+5SKxdz/eKLL0ZExOjRo3e7j1GjRmXbrFmzJqqrq3dpU11d3ao579OnT1xzzTVxxx13xO9///v49Kc/HSeffHL88pe/bHEfAAAAAOzK/67twdFHH529FE3Ezvtl7GldxK7XXp81a1b2prnvO+CAA7L/fv968B+UJMlulzfniSeeiE2bNsVTTz0VF198cQwfPjxOPfXUVvezr0jD3DY2NkZRUVHMnTs3Mpmd9yq55ppr4otf/GL86Ec/yt6zgF1VVVXF8ccfHz/72c8iSZI4/vjjo6qqapd2nWGud+zYEVOnTo3LL788RowY0aJt+D+5nOskSXa7jw/PZy6e31VVVfH1r389+/uECRNiw4YNcfXVV+82sAEAAACgZQoSePzhW0cXYretUlZWFsOHD2/1uvdVVVXtsU1NTU0sWrSoybINGzbE9u3bd/mL8ZY48MADIyLikEMOibVr18Zll11WsMDjF5/9VUH22xppmNsBAwbEAQcckA07Inb+BXqSJLFq1ao46KCDWtxXLpzx8y926P7aa+bMmXHeeedFRMSPfvSj3bbpDHNdX18ff/nLX2LJkiXZ8TY2NkaSJFFcXBwPPPBAHHPMMS3qK1dq/ra0Q/fXXu2d6/eDpmXLlsWRRx65y/r//d//jTFjxkTEzjnf3Q3l33rrrTbV7g86/PDD46c//Wm7+gAAAADY1xUk8Ohb1rMQu+00jjjiiLjyyitj9erVMWDAgIjYeR35kpKSOOyww9rVd5Ik0dDQkIthtkmmJNN8oy4sV3M7ceLE+O1vfxubNm2KPn36RMTOS+9069YtBg0alJex701ppleH77M9Jk2aFNu2bYuInffVyIdczHVFRUU8++yzTZbdcMMN8fDDD8cdd9yRDTM7UvfKyg7fZ3u0d66PPfbY6NevX8yePXuXwOPuu++O5cuXZy9/dcQRR0RdXV08/fTT8U//9E8REbFo0aKoq6vbbVjSGkuWLMmeRwAAAAC0jUtatUFDQ8Mu12svLi5ucimV+vr6Xdr07t07Kioq4thjj40xY8bEtGnT4j//8z9j/fr1ceGFF8ZZZ50VFRUV2fbPP/98bNu2LdavXx/19fWxdOnSiIgYN25cROz8a+YhQ4bEqFGjIiJiwYIF8YMf/CDOP//8PBz1vqGzzO3UqVPjiiuuiDPPPDMuv/zyePvtt2PWrFkxc+ZMl7Nqge7du8eyZcuy/96dzjDX3bp1i7Fjxzbpv7q6Onr16rXLcnavvXNdVlYWP/nJT+KUU06Js88+O84777yoqKiIP/7xjzFr1qz44he/GCeddFJE7PyW1aRJk+Kss86Kn/zkJxERcfbZZ8cJJ5wQI0eOzPb90ksvxaZNm2LNmjWxZcuW7JyPGTMmevbsGT/72c+iR48eMX78+OjWrVvcc889ce2118b3v//9XD88AAAAAPuWhF1Mnz49mTJlyh7XRcQuPyNHjsy2GTp06G7bnHPOOdk2r776anL88ccnpaWlSb9+/ZLzzjsv2bp1a5N97amf91177bXJwQcfnPTu3TupqKhIxo8fn9xwww3Jjh07cvuAdCFpmdskSZJly5Yln/nMZ5LS0tJk0KBByTe+8Y1k8+bNuXswupi9zW2SJMmUKVOS6dOnZ9t2prn+oEsvvTQ59NBD2/QY7CtyPddJkiSPP/54MmnSpCSTySQ9e/ZMxowZk/zgBz9I3nvvvSbtamtrk9NOOy0pLy9PysvLk9NOOy3ZsGFDkzZHHXXUbve5YsWKJEmS5L//+7+T0aNHJ717907Ky8uTww47LPnFL37R3ocFAAAAYJ9XlCR7uFMrAAAAAABASnQr9AAAAAAAAADaS+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6hXnq+PGxsZ48803o7y8PIqKivK1GwAAAAAAIAWSJIn6+voYOHBgdOuW++9j5C3wePPNN2Pw4MH56h4AAAAAAEih119/PQYNGpTzfvMWeJSXl0fEzoFXVFTkazcAAAAAAEAKbNy4MQYPHpzND3Itb4HH+5exqqioEHgAAAAAAAAREXm7DYablgMAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9vAce767fnO9dQJe0avUb8YPfzo5Vq98o9FDoYtZvXR+/WjY31m9dHxE76/Rfbv9rTut1PvpMsx1r18bG2dfEjrVrCz2UnOkKx5Sr8zRN5/uHn//tbQe5lo9z74N9vl3fEDc/8lK8Xd+Qs/7Zt3XkOaU2A+RPV6mxXeU4IrrWsdCx8h54bHlna753AV3Smto18XiPh2NN7ZpCD4UuZsPW9THvhV/Fhv//TcPmDVti8bxnY/OGLTnbRz76TLMd69ZF/TVzYse6dYUeSs50hWPK1XmapvP9w8//9raDXMvHuffBPt+ub4hbHn1Z4EHOdOQ5pTYD5E9XqbFd5Tgiutax0LFc0goAAAAAAEg9gQcAAAAAAJB6xfneQcO722JLnctaQWtt27w9IiLefe/dqGuoK/Bo6Eo2bdu02+UNm3JXrxs2bctJP11N4zt1saO2ttDDyInGd7pOXWrvuZ/G833Ttk17fW3ZU52AjtLcOdravj6sfsv22PBu+p67dD71W7Z3+D5z+fwAYKeu9v63K7xWdLU5oePkPfC4/8rHorRHab53A11OXeX6iMkRs1d+P2JloUfDvuDe7/6x0EPo8mpPObXQQ2A39sVz/5KF3yn0EGCv8n2Onv/zv+S1f8gnNRyA5nitYF/mklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpl/d7eBz3naNi2Ngh+d4NdDlLXlgaC968P7457KIYN/rQQg+HLmRl3YrdXs/z+P/4dFQO65uTfdSu3LBP3hehOZXzbo8eY0YXehg5sf35ZV3mniTtPffTeL5fceSVMSxz4B7X76lOQEdp7hxtjd2dz9edMSGG15TnpH/2bS+tqe/we8Lk8vkBwE5d7f1vV3it6GpzQsfJe+BRUtYzSjO98r0b6HJ69u4RERFlxWWRKckUeDR0JX169tnt8pI+uavXJX165qSfrqbbfpnoXllZ6GHkxI79uk5dau+5n8bzvU/PPnt9bdlTnYCO0tw52tq+Pqy8tEf0LUvfc5fOp7y0R4fvM5fPDwB26mrvf7vCa0VXmxM6jktaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASL28Bx6l+7lhObRFTWVNfHL7MVFTWVPoodDF9O3VL04ZOTX69uoXERG9+5bGYaccEr37luZsH/noM826V1dH+Te+Ht2rqws9lJzpCseUq/M0Tef7h5//7W0HuZaPc++DfVaVl8SXP/WRqCovyVn/7Ns68pxSmwHyp6vU2K5yHBFd61joWEVJkiT56Hjjxo2RyWSirq4uKioq8rELAAAAAAAgJfKdG7ikFQAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw6ALWb10fv1o2N9ZvXd8p+sm33Y2zvWN/u74hbn7kpXi7viEn7XIlX/vr6OMg/8xpunWW+ess4wA6H/WBQmnJ+/y2fBbYsXZtbJx9TexYu7ZN42rv9sBOrXl98VrUsQrxeJvj3POY0hnV5vl8FHh0ARu2ro95L/wqNrQzqMhVP/m2u3G2d+xv1zfELY++3KLAoyXtciVf++vo4yD/zGm6dZb56yzjADof9YFCacn7/LZ8Ftixbl3UXzMndqxb16ZxtXd7YKfWvL54LepYhXi8zXHueUzpjGo3CTwAAAAAAAD2SuABAAAAAACknsADAAAAAABIveJCD4Dc2bRtU9Q11LVr+zT54PHmauz1W7bHhne37XV9ITQ3rrb0R9eU63OFjtHZnpPOI+DDOludYt+zt8867fks0PhOXeyorW3TdkDutOT9p9eiwujIzwbmOH98xqMzqd/yXl77F3h0IZcs/E6hh9Ch8nG85//8LznvMxc667jofJwr5ILzCIDOJl+fdWpPOTUv/QKt4/1n52VuugbzSGfyXsO7ee3fJa0AAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSzz08upArjrwyhmUObPP2K+tWpOo+IB883lyN/bozJsTwmvI9rn9pTX1BrnvY3Lhaq1DHQf7l+lyhY3S256TzCPiwzlan2Pfs7bNOez4LVM67PXqMGd3q7bY/v8z9PyCHWvL+02tRYXTkZwNznD8+49GZLF3+Zhzz/fz1L/DoQvr07BOZkky7tk+TDx5vrsZeXtoj+pb13Ov6QmhuXG3pj64p1+cKHaOzPSedR8CHdbY6xb5nb5912vNZoNt+meheWdnq7Xbs1/bPXcCuWvL+02tRYXTkZwNznD8+49GZlJfmN5JwSSsAAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3h0AX179YtTRk6Nvr36dYp+8m1342zv2KvKS+LLn/pIVJWX5KRdruRrfx19HOSfOU23zjJ/nWUcQOejPlAoLXmf35bPAt2rq6P8G1+P7tXVbRpXe7cHdmrN64vXoo5ViMfbHOeex5TOqLJPfs/HoiRJknx0vHHjxshkMlFXVxcVFRX52AUAAAAAAJAS+c4NfMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/DYh7xd3xA3P/JSvF3f0Kb1AEDE+q3r41fL5sb6retbtBwAAADoGAKPfcjb9Q1xy6Mv7zXw2Nt6ACBiw9b1Me+FX8WGDwUbe1oOAAAAdAyBBwAAAAAAkHoCDwAAAAAAIPWKCz0AOl79lu2x4d1tu10OALTMpm2boq6hrsnvAAAAQOEIPPZB5//8L4UeAgCk3iULv1PoIQAAAAAf4JJWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqeceHvug686YEMNryndZ/tKaevf3AIAWuuLIK2NY5sDs7yvrVrivBwAAABSQwGMfVF7aI/qW9dztcgCgZfr07BOZkkyT3wEAAIDCcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw2IdUlZfElz/1kagqL2nTegAgom+vfnHKyKnRt1e/Fi0HAAAAOkZRkiRJPjreuHFjZDKZqKuri4qKinzsAgAAAAAASIl85wa+4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1ivPVcZIkERGxcePGfO0CAAAAAABIiffzgvfzg1zLW+BRW1sbERGDBw/O1y4AAAAAAICUqa2tjUwmk/N+8xZ49OvXLyIiXnvttbwMHEiHjRs3xuDBg+P111+PioqKQg8HKAB1AFAHgAi1AFAHgIi6uroYMmRINj/ItbwFHt267bw9SCaTUcCAqKioUAtgH6cOAOoAEKEWAOoA8H/5Qc77zUuvAAAAAAAAHUjgAQAAAAAApF7eAo+SkpK49NJLo6SkJF+7AFJALQDUAUAdACLUAkAdAPJfB4qSJEny0jMAAAAAAEAHcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqdfqwOONN96I008/PSorK6N3794xbty4WLx4cXZ9kiRx2WWXxcCBA6O0tDQ+9alPxd///vcmfTQ0NMT5558fVVVVUVZWFpMnT45Vq1a1/2iADtFcHZg/f34cd9xxUVVVFUVFRbF06dJd+lAHIP32Vgu2b98eF110URxyyCFRVlYWAwcOjDPOOCPefPPNJn2oBZBuzb0nuOyyy2LUqFFRVlYWffv2jc985jOxaNGiJn2oA5BuzdWBDzrnnHOiqKgofvjDHzZZrg5A+jVXC2bMmBFFRUVNfg4//PAmfagFkG4teU+wbNmymDx5cmQymSgvL4/DDz88Xnvttez6XNSBVgUeGzZsiIkTJ0aPHj3iD3/4Qzz//PMxe/bs2G+//bJtrr766rjmmmvi+uuvjz//+c9RU1MT//zP/xz19fXZNv/2b/8Wd955Z8ybNy8WLFgQmzZtihNOOCF27NjRqsEDHa8ldeDdd9+NiRMnxlVXXbXHftQBSLfmasHmzZvjmWeeiUsuuSSeeeaZmD9/frz44osxefLkJv2oBZBeLXlPMGLEiLj++uvj2WefjQULFsSwYcPi2GOPjbfeeivbRh2A9GpJHXjfXXfdFYsWLYqBAwfusk4dgHRraS2YNGlSrF69Ovvz+9//vsl6tQDSqyV14OWXX45PfOITMWrUqHj00Ufjr3/9a1xyySXRq1evbJuc1IGkFS666KLkE5/4xB7XNzY2JjU1NclVV12VXbZ169Ykk8kkP/7xj5MkSZJ33nkn6dGjRzJv3rxsmzfeeCPp1q1bct9997VmOEABNFcHPmjFihVJRCRLlixpslwdgPRrTS1439NPP51ERPLqq68mSaIWQNq1pQ7U1dUlEZE89NBDSZKoA5B2La0Dq1atSg444IDkueeeS4YOHZrMmTMnu04dgPRrSS2YPn16MmXKlD2uVwsg3VpSB04++eTk9NNP3+P6XNWBVn3D4+67744JEybEl770paiuro7x48fHzTffnF2/YsWKWLNmTRx77LHZZSUlJXHUUUfFwoULIyJi8eLFsX379iZtBg4cGGPHjs22ATqv5upAS6gDkH5tqQV1dXVRVFSU/QsPtQDSrbV1YNu2bXHTTTdFJpOJQw89NCLUAUi7ltSBxsbGmDZtWsyaNSsOPvjgXfpQByD9Wvqe4NFHH43q6uoYMWJEnHXWWbFu3brsOrUA0q25OtDY2Bj33ntvjBgxIo477riorq6Oj3/843HXXXdl2+SqDrQq8HjllVfixhtvjIMOOijuv//+OPfcc+OCCy6In//85xERsWbNmoiI6N+/f5Pt+vfvn123Zs2a6NmzZ/Tt23ePbYDOq7k60BLqAKRfa2vB1q1b4+KLL46pU6dGRUVFRKgFkHYtrQO/+93vok+fPtGrV6+YM2dOPPjgg1FVVRUR6gCkXUvqwPe///0oLi6OCy64YLd9qAOQfi2pBZ/97Gdj7ty58fDDD8fs2bPjz3/+cxxzzDHR0NAQEWoBpF1zdWDdunWxadOmuOqqq2LSpEnxwAMPxBe+8IU48cQT47HHHouI3NWB4tYMvLGxMSZMmBDf+973IiJi/Pjx8fe//z1uvPHGOOOMM7LtioqKmmyXJMkuyz6sJW2AwmtpHWgLdQDSozW1YPv27XHKKadEY2Nj3HDDDc32rRZAOrS0Dhx99NGxdOnSePvtt+Pmm2+Ok046KRYtWhTV1dV77FsdgHRorg4sXrw4/uu//iueeeaZVj+n1QFIj5a8Jzj55JOz7ceOHRsTJkyIoUOHxr333hsnnnjiHvtWCyAdmqsDjY2NERExZcqU+PrXvx4REePGjYuFCxfGj3/84zjqqKP22Hdr60CrvuExYMCAGDNmTJNlo0ePzt5JvaamJiJil8Rl3bp12W991NTUxLZt22LDhg17bAN0Xs3VgZZQByD9WloLtm/fHieddFKsWLEiHnzwwey3OyLUAki7ltaBsrKyGD58eBx++OFxyy23RHFxcdxyyy0RoQ5A2jVXB5544olYt25dDBkyJIqLi6O4uDheffXV+OY3vxnDhg2LCHUAuoK2/D/BgAEDYujQobF8+fKIUAsg7ZqrA1VVVVFcXNxstpCLOtCqwGPixInxwgsvNFn24osvxtChQyMi4sADD4yampp48MEHs+u3bdsWjz32WBx55JEREXHYYYdFjx49mrRZvXp1PPfcc9k2QOfVXB1oCXUA0q8lteD9sGP58uXx0EMPRWVlZZP2agGkW1vfEyRJkr18hToA6dZcHZg2bVr87W9/i6VLl2Z/Bg4cGLNmzYr7778/ItQB6Ara8p6gtrY2Xn/99RgwYEBEqAWQds3VgZ49e8Y//uM/7rVNzupAi29vniTJ008/nRQXFydXXnllsnz58mTu3LlJ7969k1/+8pfZNldddVWSyWSS+fPnJ88++2xy6qmnJgMGDEg2btyYbXPuuecmgwYNSh566KHkmWeeSY455pjk0EMPTd57773WDAcogJbUgdra2mTJkiXJvffem0REMm/evGTJkiXJ6tWrs23UAUi35mrB9u3bk8mTJyeDBg1Kli5dmqxevTr709DQkO1HLYD0aq4ObNq0Kfn2t7+d/OlPf0pWrlyZLF68OPnyl7+clJSUJM8991y2H3UA0qslnw0+bOjQocmcOXOaLFMHIN2aqwX19fXJN7/5zWThwoXJihUrkkceeSQ54ogjkgMOOMD/F0IX0ZL3BPPnz0969OiR3HTTTcny5cuT6667LunevXvyxBNPZNvkog60KvBIkiS55557krFjxyYlJSXJqFGjkptuuqnJ+sbGxuTSSy9NampqkpKSkuSTn/xk8uyzzzZps2XLluS8885L+vXrl5SWliYnnHBC8tprr7V2KECBNFcHbrvttiQidvm59NJLs23UAUi/vdWCFStW7LYORETyyCOPZNupBZBue6sDW7ZsSb7whS8kAwcOTHr27JkMGDAgmTx5cvL000836UMdgHRr7rPBh+0u8FAHIP32Vgs2b96cHHvsscn++++f9OjRIxkyZEgyffr0XZ7nagGkW0veE9xyyy3J8OHDk169eiWHHnpoctdddzVZn4s6UJQkSdKKb6cAAAAAAAB0Oq26hwcAAAAAAEBnJPAAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AACAdrvsssti3LhxhR4GAACwDytKkiQp9CAAAIDOq6ioaK/rp0+fHtdff300NDREZWVlB40KAACgKYEHAACwV2vWrMn++9e//nV897vfjRdeeCG7rLS0NDKZTCGGBgAAkOWSVgAAwF7V1NRkfzKZTBQVFe2y7MOXtJoxY0Z8/vOfj+9973vRv3//2G+//eLyyy+P9957L2bNmhX9+vWLQYMGxa233tpkX2+88UacfPLJ0bdv36isrIwpU6bEypUrO/aAAQCAVBJ4AAAAefHwww/Hm2++GY8//nhcc801cdlll8UJJ5wQffv2jUWLFsW5554b5557brz++usREbF58+Y4+uijo0+fPvH444/HggULok+fPjFp0qTYtm1bgY8GAADo7AQeAABAXvTr1y+uvfbaGDlyZMycOTNGjhwZmzdvjn//93+Pgw46KL797W9Hz54948knn4yIiHnz5kW3bt3ipz/9aRxyyCExevTouO222+K1116LRx99tLAHAwAAdHrFhR4AAADQNR188MHRrdv//Y1V//79Y+zYsdnfu3fvHpWVlbFu3bqIiFi8eHG89NJLUV5e3qSfrVu3xssvv9wxgwYAAFJL4AEAAORFjx49mvxeVFS022WNjY0REdHY2BiHHXZYzJ07d5e+9t9///wNFAAA6BIEHgAAQKfwsY99LH79619HdXV1VFRUFHo4AABAyriHBwAA0CmcdtppUVVVFVOmTIknnngiVqxYEY899lh87Wtfi1WrVhV6eAAAQCcn8AAAADqF3r17x+OPPx5DhgyJE088MUaPHh0zZ86MLVu2+MYHAADQrKIkSZJCDwIAAAAAAKA9fMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOr9fw+gShyFf/1LAAAAAElFTkSuQmCC",
"text/plain": [
""
]
@@ -157,7 +157,7 @@
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAl9UlEQVR4nO3de3RV5Z0//k8gEEJIjpAYAnJzitzECpWZqnTVajtKRwutq/WCIkjrZU3VTlupdrqsOi67rFOko1ZbrTq9UGnroF+trbd6RSq2CK1WRlFBRblowBAEApL9+4OfZ4xccjsnJzu8XmtlLbL3s5/97PPs8znn8M7ZuyhJkiQAAAAAAABSrFuhBwAAAAAAANBeAg8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2Bx27MmDEjioqKdvl56aWX9rhu0qRJ2e2HDRu22zZXXXVVts1rr70Wn/vc56KsrCyqqqriggsuiG3btmXXb926NWbMmBGHHHJIFBcXx+c///ldxrlgwYKYOHFiVFZWRmlpaYwaNSrmzJmT18cm7dIytxERDQ0N8Z3vfCeGDh0aJSUl8ZGPfCRuvfXWvD02aff+/J177rm7rPvXf/3XKCoqihkzZjRp21nm+n1PPvlkFBcXx7hx49r1WHR1uZ7riIiFCxfGv/zLv0Tfvn2jV69eccghh8Ts2bNjx44dTdpt2LAhpk2bFplMJjKZTEybNi3eeeedJm2+9rWvxWGHHRYlJSW7ncuVK1fudkz33Xdfux4XAAAAgH1dcSF2uuHdbc03yqG+ZT1bvc2kSZPitttua7Js//333+O6kpKSJr//x3/8R5x11llNlpWXl0dExI4dO+L444+P/fffPxYsWBC1tbUxffr0SJIkrrvuumyb0tLSuOCCC+J//ud/djvGsrKyOO+88+KjH/1olJWVxYIFC+Kcc86JsrKyOPvss1t9zLlQ11DXYfvKlGTatF0a5jYi4qSTToq1a9fGLbfcEsOHD49169bFe++916Zjbq8tdVs7dH+lmV5t2m7w4MExb968mDNnTpSWlkbEztDh9ttvjyFDhjRp25nmOiKirq4uzjjjjPj0pz8da9eubf3B58iO2toO3V/3yso2bZfLub7zzjvjpJNOijPPPDMeeeSR2G+//eKhhx6Kb33rW/HUU0/Fb37zmygqKoqIiKlTp8aqVauy4cTZZ58d06ZNi3vuuSfbX5IkMXPmzFi0aFH87W9/2+MxPPTQQ3HwwQdnf+/Xr1+bHgsAAAAAdipI4PHZqx/p0P09dflxrd6mpKQkampqWr3ufeXl5Xts88ADD8Tzzz8fr7/+egwcODAiImbPnh0zZsyIK6+8MioqKqKsrCxuvPHGiNj5V98f/gviiIjx48fH+PHjs78PGzYs5s+fH0888UTBAo9pf5jaYfu6+/P3tmm7NMztfffdF4899li88sor2f8EHTZsWAuPMPd+fsYdHbq/c/7f6W3a7mMf+1i88sorMX/+/DjttNMiImL+/PkxePDg+Id/+IcmbTvLXL/vnHPOialTp0b37t3jrrvuauER596aj47r0P0d8MbrbdouV3P97rvvxllnnRWTJ0+Om266Kbv8K1/5SvTv3z8mT54cv/nNb+Lkk0+OZcuWxX333RdPPfVUfPzjH4+IiJtvvjmOOOKIeOGFF2LkyJEREXHttddGRMRbb72118CjsrKy2XMQAAAAgJZzSasC+NOf/hRjx47N/idpRMRxxx0XDQ0NsXjx4jb3u2TJkli4cGEcddRRuRgmbZCrub377rtjwoQJcfXVV8cBBxwQI0aMiAsvvDC2bNmSj2F3KWeeeWaTv+a/9dZbY+bMmTnfTy6fx7fddlu8/PLLcemll+Z6mF1aLub6gQceiNra2rjwwgt3Wfe5z30uRowYEbfffntE7JzzTCaTDTsiIg4//PDIZDKxcOHCVo9/8uTJUV1dHRMnTow77ujYUBEAAACgKxJ47MHvfve76NOnT/bnS1/60h7X9enTJ6644oom21900UW7tHn00UcjImLNmjXRv3//Ju379u0bPXv2jDVr1rR6rIMGDYqSkpKYMGFCfPWrX42vfOUrrT/gfUga5vaVV16JBQsWxHPPPRd33nln/PCHP4w77rgjvvrVr7b9wPcR06ZNiwULFsTKlSvj1VdfjSeffDJOP33Xb4x0lrlevnx5XHzxxTF37twoLi7Il+5SKxdz/eKLL0ZExOjRo3e7j1GjRmXbrFmzJqqrq3dpU11d3ao579OnT1xzzTVxxx13xO9///v49Kc/HSeffHL88pe/bHEfAAAAAOzK/67twdFHH529FE3Ezvtl7GldxK7XXp81a1b2prnvO+CAA7L/fv968B+UJMlulzfniSeeiE2bNsVTTz0VF198cQwfPjxOPfXUVvezr0jD3DY2NkZRUVHMnTs3Mpmd9yq55ppr4otf/GL86Ec/yt6zgF1VVVXF8ccfHz/72c8iSZI4/vjjo6qqapd2nWGud+zYEVOnTo3LL788RowY0aJt+D+5nOskSXa7jw/PZy6e31VVVfH1r389+/uECRNiw4YNcfXVV+82sAEAAACgZQoSePzhW0cXYretUlZWFsOHD2/1uvdVVVXtsU1NTU0sWrSoybINGzbE9u3bd/mL8ZY48MADIyLikEMOibVr18Zll11WsMDjF5/9VUH22xppmNsBAwbEAQcckA07Inb+BXqSJLFq1ao46KCDWtxXLpzx8y926P7aa+bMmXHeeedFRMSPfvSj3bbpDHNdX18ff/nLX2LJkiXZ8TY2NkaSJFFcXBwPPPBAHHPMMS3qK1dq/ra0Q/fXXu2d6/eDpmXLlsWRRx65y/r//d//jTFjxkTEzjnf3Q3l33rrrTbV7g86/PDD46c//Wm7+gAAAADY1xUk8Ohb1rMQu+00jjjiiLjyyitj9erVMWDAgIjYeR35kpKSOOyww9rVd5Ik0dDQkIthtkmmJNN8oy4sV3M7ceLE+O1vfxubNm2KPn36RMTOS+9069YtBg0alJex701ppleH77M9Jk2aFNu2bYuInffVyIdczHVFRUU8++yzTZbdcMMN8fDDD8cdd9yRDTM7UvfKyg7fZ3u0d66PPfbY6NevX8yePXuXwOPuu++O5cuXZy9/dcQRR0RdXV08/fTT8U//9E8REbFo0aKoq6vbbVjSGkuWLMmeRwAAAAC0jUtatUFDQ8Mu12svLi5ucimV+vr6Xdr07t07Kioq4thjj40xY8bEtGnT4j//8z9j/fr1ceGFF8ZZZ50VFRUV2fbPP/98bNu2LdavXx/19fWxdOnSiIgYN25cROz8a+YhQ4bEqFGjIiJiwYIF8YMf/CDOP//8PBz1vqGzzO3UqVPjiiuuiDPPPDMuv/zyePvtt2PWrFkxc+ZMl7Nqge7du8eyZcuy/96dzjDX3bp1i7Fjxzbpv7q6Onr16rXLcnavvXNdVlYWP/nJT+KUU06Js88+O84777yoqKiIP/7xjzFr1qz44he/GCeddFJE7PyW1aRJk+Kss86Kn/zkJxERcfbZZ8cJJ5wQI0eOzPb90ksvxaZNm2LNmjWxZcuW7JyPGTMmevbsGT/72c+iR48eMX78+OjWrVvcc889ce2118b3v//9XD88AAAAAPuWhF1Mnz49mTJlyh7XRcQuPyNHjsy2GTp06G7bnHPOOdk2r776anL88ccnpaWlSb9+/ZLzzjsv2bp1a5N97amf91177bXJwQcfnPTu3TupqKhIxo8fn9xwww3Jjh07cvuAdCFpmdskSZJly5Yln/nMZ5LS0tJk0KBByTe+8Y1k8+bNuXswupi9zW2SJMmUKVOS6dOnZ9t2prn+oEsvvTQ59NBD2/QY7CtyPddJkiSPP/54MmnSpCSTySQ9e/ZMxowZk/zgBz9I3nvvvSbtamtrk9NOOy0pLy9PysvLk9NOOy3ZsGFDkzZHHXXUbve5YsWKJEmS5L//+7+T0aNHJ717907Ky8uTww47LPnFL37R3ocFAAAAYJ9XlCR7uFMrAAAAAABASnQr9AAAAAAAAADaS+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6hXnq+PGxsZ48803o7y8PIqKivK1GwAAAAAAIAWSJIn6+voYOHBgdOuW++9j5C3wePPNN2Pw4MH56h4AAAAAAEih119/PQYNGpTzfvMWeJSXl0fEzoFXVFTkazcAAAAAAEAKbNy4MQYPHpzND3Itb4HH+5exqqioEHgAAAAAAAAREXm7DYablgMAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9vAce767fnO9dQJe0avUb8YPfzo5Vq98o9FDoYtZvXR+/WjY31m9dHxE76/Rfbv9rTut1PvpMsx1r18bG2dfEjrVrCz2UnOkKx5Sr8zRN5/uHn//tbQe5lo9z74N9vl3fEDc/8lK8Xd+Qs/7Zt3XkOaU2A+RPV6mxXeU4IrrWsdCx8h54bHlna753AV3Smto18XiPh2NN7ZpCD4UuZsPW9THvhV/Fhv//TcPmDVti8bxnY/OGLTnbRz76TLMd69ZF/TVzYse6dYUeSs50hWPK1XmapvP9w8//9raDXMvHuffBPt+ub4hbHn1Z4EHOdOQ5pTYD5E9XqbFd5Tgiutax0LFc0goAAAAAAEg9gQcAAAAAAJB6xfneQcO722JLnctaQWtt27w9IiLefe/dqGuoK/Bo6Eo2bdu02+UNm3JXrxs2bctJP11N4zt1saO2ttDDyInGd7pOXWrvuZ/G833Ttk17fW3ZU52AjtLcOdravj6sfsv22PBu+p67dD71W7Z3+D5z+fwAYKeu9v63K7xWdLU5oePkPfC4/8rHorRHab53A11OXeX6iMkRs1d+P2JloUfDvuDe7/6x0EPo8mpPObXQQ2A39sVz/5KF3yn0EGCv8n2Onv/zv+S1f8gnNRyA5nitYF/mklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpl/d7eBz3naNi2Ngh+d4NdDlLXlgaC968P7457KIYN/rQQg+HLmRl3YrdXs/z+P/4dFQO65uTfdSu3LBP3hehOZXzbo8eY0YXehg5sf35ZV3mniTtPffTeL5fceSVMSxz4B7X76lOQEdp7hxtjd2dz9edMSGG15TnpH/2bS+tqe/we8Lk8vkBwE5d7f1vV3it6GpzQsfJe+BRUtYzSjO98r0b6HJ69u4RERFlxWWRKckUeDR0JX169tnt8pI+uavXJX165qSfrqbbfpnoXllZ6GHkxI79uk5dau+5n8bzvU/PPnt9bdlTnYCO0tw52tq+Pqy8tEf0LUvfc5fOp7y0R4fvM5fPDwB26mrvf7vCa0VXmxM6jktaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASL28Bx6l+7lhObRFTWVNfHL7MVFTWVPoodDF9O3VL04ZOTX69uoXERG9+5bGYaccEr37luZsH/noM826V1dH+Te+Ht2rqws9lJzpCseUq/M0Tef7h5//7W0HuZaPc++DfVaVl8SXP/WRqCovyVn/7Ns68pxSmwHyp6vU2K5yHBFd61joWEVJkiT56Hjjxo2RyWSirq4uKioq8rELAAAAAAAgJfKdG7ikFQAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw6ALWb10fv1o2N9ZvXd8p+sm33Y2zvWN/u74hbn7kpXi7viEn7XIlX/vr6OMg/8xpunWW+ess4wA6H/WBQmnJ+/y2fBbYsXZtbJx9TexYu7ZN42rv9sBOrXl98VrUsQrxeJvj3POY0hnV5vl8FHh0ARu2ro95L/wqNrQzqMhVP/m2u3G2d+xv1zfELY++3KLAoyXtciVf++vo4yD/zGm6dZb56yzjADof9YFCacn7/LZ8Ftixbl3UXzMndqxb16ZxtXd7YKfWvL54LepYhXi8zXHueUzpjGo3CTwAAAAAAAD2SuABAAAAAACknsADAAAAAABIveJCD4Dc2bRtU9Q11LVr+zT54PHmauz1W7bHhne37XV9ITQ3rrb0R9eU63OFjtHZnpPOI+DDOludYt+zt8867fks0PhOXeyorW3TdkDutOT9p9eiwujIzwbmOH98xqMzqd/yXl77F3h0IZcs/E6hh9Ch8nG85//8LznvMxc667jofJwr5ILzCIDOJl+fdWpPOTUv/QKt4/1n52VuugbzSGfyXsO7ee3fJa0AAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSzz08upArjrwyhmUObPP2K+tWpOo+IB883lyN/bozJsTwmvI9rn9pTX1BrnvY3Lhaq1DHQf7l+lyhY3S256TzCPiwzlan2Pfs7bNOez4LVM67PXqMGd3q7bY/v8z9PyCHWvL+02tRYXTkZwNznD8+49GZLF3+Zhzz/fz1L/DoQvr07BOZkky7tk+TDx5vrsZeXtoj+pb13Ov6QmhuXG3pj64p1+cKHaOzPSedR8CHdbY6xb5nb5912vNZoNt+meheWdnq7Xbs1/bPXcCuWvL+02tRYXTkZwNznD8+49GZlJfmN5JwSSsAAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3h0AX179YtTRk6Nvr36dYp+8m1342zv2KvKS+LLn/pIVJWX5KRdruRrfx19HOSfOU23zjJ/nWUcQOejPlAoLXmf35bPAt2rq6P8G1+P7tXVbRpXe7cHdmrN64vXoo5ViMfbHOeex5TOqLJPfs/HoiRJknx0vHHjxshkMlFXVxcVFRX52AUAAAAAAJAS+c4NfMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/DYh7xd3xA3P/JSvF3f0Kb1AEDE+q3r41fL5sb6retbtBwAAADoGAKPfcjb9Q1xy6Mv7zXw2Nt6ACBiw9b1Me+FX8WGDwUbe1oOAAAAdAyBBwAAAAAAkHoCDwAAAAAAIPWKCz0AOl79lu2x4d1tu10OALTMpm2boq6hrsnvAAAAQOEIPPZB5//8L4UeAgCk3iULv1PoIQAAAAAf4JJWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqeceHvug686YEMNryndZ/tKaevf3AIAWuuLIK2NY5sDs7yvrVrivBwAAABSQwGMfVF7aI/qW9dztcgCgZfr07BOZkkyT3wEAAIDCcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw2IdUlZfElz/1kagqL2nTegAgom+vfnHKyKnRt1e/Fi0HAAAAOkZRkiRJPjreuHFjZDKZqKuri4qKinzsAgAAAAAASIl85wa+4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1ivPVcZIkERGxcePGfO0CAAAAAABIiffzgvfzg1zLW+BRW1sbERGDBw/O1y4AAAAAAICUqa2tjUwmk/N+8xZ49OvXLyIiXnvttbwMHEiHjRs3xuDBg+P111+PioqKQg8HKAB1AFAHgAi1AFAHgIi6uroYMmRINj/ItbwFHt267bw9SCaTUcCAqKioUAtgH6cOAOoAEKEWAOoA8H/5Qc77zUuvAAAAAAAAHUjgAQAAAAAApF7eAo+SkpK49NJLo6SkJF+7AFJALQDUAUAdACLUAkAdAPJfB4qSJEny0jMAAAAAAEAHcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqdfqwOONN96I008/PSorK6N3794xbty4WLx4cXZ9kiRx2WWXxcCBA6O0tDQ+9alPxd///vcmfTQ0NMT5558fVVVVUVZWFpMnT45Vq1a1/2iADtFcHZg/f34cd9xxUVVVFUVFRbF06dJd+lAHIP32Vgu2b98eF110URxyyCFRVlYWAwcOjDPOOCPefPPNJn2oBZBuzb0nuOyyy2LUqFFRVlYWffv2jc985jOxaNGiJn2oA5BuzdWBDzrnnHOiqKgofvjDHzZZrg5A+jVXC2bMmBFFRUVNfg4//PAmfagFkG4teU+wbNmymDx5cmQymSgvL4/DDz88Xnvttez6XNSBVgUeGzZsiIkTJ0aPHj3iD3/4Qzz//PMxe/bs2G+//bJtrr766rjmmmvi+uuvjz//+c9RU1MT//zP/xz19fXZNv/2b/8Wd955Z8ybNy8WLFgQmzZtihNOOCF27NjRqsEDHa8ldeDdd9+NiRMnxlVXXbXHftQBSLfmasHmzZvjmWeeiUsuuSSeeeaZmD9/frz44osxefLkJv2oBZBeLXlPMGLEiLj++uvj2WefjQULFsSwYcPi2GOPjbfeeivbRh2A9GpJHXjfXXfdFYsWLYqBAwfusk4dgHRraS2YNGlSrF69Ovvz+9//vsl6tQDSqyV14OWXX45PfOITMWrUqHj00Ufjr3/9a1xyySXRq1evbJuc1IGkFS666KLkE5/4xB7XNzY2JjU1NclVV12VXbZ169Ykk8kkP/7xj5MkSZJ33nkn6dGjRzJv3rxsmzfeeCPp1q1bct9997VmOEABNFcHPmjFihVJRCRLlixpslwdgPRrTS1439NPP51ERPLqq68mSaIWQNq1pQ7U1dUlEZE89NBDSZKoA5B2La0Dq1atSg444IDkueeeS4YOHZrMmTMnu04dgPRrSS2YPn16MmXKlD2uVwsg3VpSB04++eTk9NNP3+P6XNWBVn3D4+67744JEybEl770paiuro7x48fHzTffnF2/YsWKWLNmTRx77LHZZSUlJXHUUUfFwoULIyJi8eLFsX379iZtBg4cGGPHjs22ATqv5upAS6gDkH5tqQV1dXVRVFSU/QsPtQDSrbV1YNu2bXHTTTdFJpOJQw89NCLUAUi7ltSBxsbGmDZtWsyaNSsOPvjgXfpQByD9Wvqe4NFHH43q6uoYMWJEnHXWWbFu3brsOrUA0q25OtDY2Bj33ntvjBgxIo477riorq6Oj3/843HXXXdl2+SqDrQq8HjllVfixhtvjIMOOijuv//+OPfcc+OCCy6In//85xERsWbNmoiI6N+/f5Pt+vfvn123Zs2a6NmzZ/Tt23ePbYDOq7k60BLqAKRfa2vB1q1b4+KLL46pU6dGRUVFRKgFkHYtrQO/+93vok+fPtGrV6+YM2dOPPjgg1FVVRUR6gCkXUvqwPe///0oLi6OCy64YLd9qAOQfi2pBZ/97Gdj7ty58fDDD8fs2bPjz3/+cxxzzDHR0NAQEWoBpF1zdWDdunWxadOmuOqqq2LSpEnxwAMPxBe+8IU48cQT47HHHouI3NWB4tYMvLGxMSZMmBDf+973IiJi/Pjx8fe//z1uvPHGOOOMM7LtioqKmmyXJMkuyz6sJW2AwmtpHWgLdQDSozW1YPv27XHKKadEY2Nj3HDDDc32rRZAOrS0Dhx99NGxdOnSePvtt+Pmm2+Ok046KRYtWhTV1dV77FsdgHRorg4sXrw4/uu//iueeeaZVj+n1QFIj5a8Jzj55JOz7ceOHRsTJkyIoUOHxr333hsnnnjiHvtWCyAdmqsDjY2NERExZcqU+PrXvx4REePGjYuFCxfGj3/84zjqqKP22Hdr60CrvuExYMCAGDNmTJNlo0ePzt5JvaamJiJil8Rl3bp12W991NTUxLZt22LDhg17bAN0Xs3VgZZQByD9WloLtm/fHieddFKsWLEiHnzwwey3OyLUAki7ltaBsrKyGD58eBx++OFxyy23RHFxcdxyyy0RoQ5A2jVXB5544olYt25dDBkyJIqLi6O4uDheffXV+OY3vxnDhg2LCHUAuoK2/D/BgAEDYujQobF8+fKIUAsg7ZqrA1VVVVFcXNxstpCLOtCqwGPixInxwgsvNFn24osvxtChQyMi4sADD4yampp48MEHs+u3bdsWjz32WBx55JEREXHYYYdFjx49mrRZvXp1PPfcc9k2QOfVXB1oCXUA0q8lteD9sGP58uXx0EMPRWVlZZP2agGkW1vfEyRJkr18hToA6dZcHZg2bVr87W9/i6VLl2Z/Bg4cGLNmzYr7778/ItQB6Ara8p6gtrY2Xn/99RgwYEBEqAWQds3VgZ49e8Y//uM/7rVNzupAi29vniTJ008/nRQXFydXXnllsnz58mTu3LlJ7969k1/+8pfZNldddVWSyWSS+fPnJ88++2xy6qmnJgMGDEg2btyYbXPuuecmgwYNSh566KHkmWeeSY455pjk0EMPTd57773WDAcogJbUgdra2mTJkiXJvffem0REMm/evGTJkiXJ6tWrs23UAUi35mrB9u3bk8mTJyeDBg1Kli5dmqxevTr709DQkO1HLYD0aq4ObNq0Kfn2t7+d/OlPf0pWrlyZLF68OPnyl7+clJSUJM8991y2H3UA0qslnw0+bOjQocmcOXOaLFMHIN2aqwX19fXJN7/5zWThwoXJihUrkkceeSQ54ogjkgMOOMD/F0IX0ZL3BPPnz0969OiR3HTTTcny5cuT6667LunevXvyxBNPZNvkog60KvBIkiS55557krFjxyYlJSXJqFGjkptuuqnJ+sbGxuTSSy9NampqkpKSkuSTn/xk8uyzzzZps2XLluS8885L+vXrl5SWliYnnHBC8tprr7V2KECBNFcHbrvttiQidvm59NJLs23UAUi/vdWCFStW7LYORETyyCOPZNupBZBue6sDW7ZsSb7whS8kAwcOTHr27JkMGDAgmTx5cvL000836UMdgHRr7rPBh+0u8FAHIP32Vgs2b96cHHvsscn++++f9OjRIxkyZEgyffr0XZ7nagGkW0veE9xyyy3J8OHDk169eiWHHnpoctdddzVZn4s6UJQkSdKKb6cAAAAAAAB0Oq26hwcAAAAAAEBnJPAAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AACAdrvsssti3LhxhR4GAACwDytKkiQp9CAAAIDOq6ioaK/rp0+fHtdff300NDREZWVlB40KAACgKYEHAACwV2vWrMn++9e//nV897vfjRdeeCG7rLS0NDKZTCGGBgAAkOWSVgAAwF7V1NRkfzKZTBQVFe2y7MOXtJoxY0Z8/vOfj+9973vRv3//2G+//eLyyy+P9957L2bNmhX9+vWLQYMGxa233tpkX2+88UacfPLJ0bdv36isrIwpU6bEypUrO/aAAQCAVBJ4AAAAefHwww/Hm2++GY8//nhcc801cdlll8UJJ5wQffv2jUWLFsW5554b5557brz++usREbF58+Y4+uijo0+fPvH444/HggULok+fPjFp0qTYtm1bgY8GAADo7AQeAABAXvTr1y+uvfbaGDlyZMycOTNGjhwZmzdvjn//93+Pgw46KL797W9Hz54948knn4yIiHnz5kW3bt3ipz/9aRxyyCExevTouO222+K1116LRx99tLAHAwAAdHrFhR4AAADQNR188MHRrdv//Y1V//79Y+zYsdnfu3fvHpWVlbFu3bqIiFi8eHG89NJLUV5e3qSfrVu3xssvv9wxgwYAAFJL4AEAAORFjx49mvxeVFS022WNjY0REdHY2BiHHXZYzJ07d5e+9t9///wNFAAA6BIEHgAAQKfwsY99LH79619HdXV1VFRUFHo4AABAyriHBwAA0CmcdtppUVVVFVOmTIknnngiVqxYEY899lh87Wtfi1WrVhV6eAAAQCcn8AAAADqF3r17x+OPPx5DhgyJE088MUaPHh0zZ86MLVu2+MYHAADQrKIkSZJCDwIAAAAAAKA9fMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOr9fw+gShyFf/1LAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAl9UlEQVR4nO3de3RV5Z0//k8gEEJIjpAYAnJzitzECpWZqnTVajtKRwutq/WCIkjrZU3VTlupdrqsOi67rFOko1ZbrTq9UGnroF+trbd6RSq2CK1WRlFBRblowBAEApL9+4OfZ4xccjsnJzu8XmtlLbL3s5/97PPs8znn8M7ZuyhJkiQAAAAAAABSrFuhBwAAAAAAANBeAg8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2Bx27MmDEjioqKdvl56aWX9rhu0qRJ2e2HDRu22zZXXXVVts1rr70Wn/vc56KsrCyqqqriggsuiG3btmXXb926NWbMmBGHHHJIFBcXx+c///ldxrlgwYKYOHFiVFZWRmlpaYwaNSrmzJmT18cm7dIytxERDQ0N8Z3vfCeGDh0aJSUl8ZGPfCRuvfXWvD02aff+/J177rm7rPvXf/3XKCoqihkzZjRp21nm+n1PPvlkFBcXx7hx49r1WHR1uZ7riIiFCxfGv/zLv0Tfvn2jV69eccghh8Ts2bNjx44dTdpt2LAhpk2bFplMJjKZTEybNi3eeeedJm2+9rWvxWGHHRYlJSW7ncuVK1fudkz33Xdfux4XAAAAgH1dcSF2uuHdbc03yqG+ZT1bvc2kSZPitttua7Js//333+O6kpKSJr//x3/8R5x11llNlpWXl0dExI4dO+L444+P/fffPxYsWBC1tbUxffr0SJIkrrvuumyb0tLSuOCCC+J//ud/djvGsrKyOO+88+KjH/1olJWVxYIFC+Kcc86JsrKyOPvss1t9zLlQ11DXYfvKlGTatF0a5jYi4qSTToq1a9fGLbfcEsOHD49169bFe++916Zjbq8tdVs7dH+lmV5t2m7w4MExb968mDNnTpSWlkbEztDh9ttvjyFDhjRp25nmOiKirq4uzjjjjPj0pz8da9eubf3B58iO2toO3V/3yso2bZfLub7zzjvjpJNOijPPPDMeeeSR2G+//eKhhx6Kb33rW/HUU0/Fb37zmygqKoqIiKlTp8aqVauy4cTZZ58d06ZNi3vuuSfbX5IkMXPmzFi0aFH87W9/2+MxPPTQQ3HwwQdnf+/Xr1+bHgsAAAAAdipI4PHZqx/p0P09dflxrd6mpKQkampqWr3ufeXl5Xts88ADD8Tzzz8fr7/+egwcODAiImbPnh0zZsyIK6+8MioqKqKsrCxuvPHGiNj5V98f/gviiIjx48fH+PHjs78PGzYs5s+fH0888UTBAo9pf5jaYfu6+/P3tmm7NMztfffdF4899li88sor2f8EHTZsWAuPMPd+fsYdHbq/c/7f6W3a7mMf+1i88sorMX/+/DjttNMiImL+/PkxePDg+Id/+IcmbTvLXL/vnHPOialTp0b37t3jrrvuauER596aj47r0P0d8MbrbdouV3P97rvvxllnnRWTJ0+Om266Kbv8K1/5SvTv3z8mT54cv/nNb+Lkk0+OZcuWxX333RdPPfVUfPzjH4+IiJtvvjmOOOKIeOGFF2LkyJEREXHttddGRMRbb72118CjsrKy2XMQAAAAgJZzSasC+NOf/hRjx47N/idpRMRxxx0XDQ0NsXjx4jb3u2TJkli4cGEcddRRuRgmbZCrub377rtjwoQJcfXVV8cBBxwQI0aMiAsvvDC2bNmSj2F3KWeeeWaTv+a/9dZbY+bMmTnfTy6fx7fddlu8/PLLcemll+Z6mF1aLub6gQceiNra2rjwwgt3Wfe5z30uRowYEbfffntE7JzzTCaTDTsiIg4//PDIZDKxcOHCVo9/8uTJUV1dHRMnTow77ujYUBEAAACgKxJ47MHvfve76NOnT/bnS1/60h7X9enTJ6644oom21900UW7tHn00UcjImLNmjXRv3//Ju379u0bPXv2jDVr1rR6rIMGDYqSkpKYMGFCfPWrX42vfOUrrT/gfUga5vaVV16JBQsWxHPPPRd33nln/PCHP4w77rgjvvrVr7b9wPcR06ZNiwULFsTKlSvj1VdfjSeffDJOP33Xb4x0lrlevnx5XHzxxTF37twoLi7Il+5SKxdz/eKLL0ZExOjRo3e7j1GjRmXbrFmzJqqrq3dpU11d3ao579OnT1xzzTVxxx13xO9///v49Kc/HSeffHL88pe/bHEfAAAAAOzK/67twdFHH529FE3Ezvtl7GldxK7XXp81a1b2prnvO+CAA7L/fv968B+UJMlulzfniSeeiE2bNsVTTz0VF198cQwfPjxOPfXUVvezr0jD3DY2NkZRUVHMnTs3Mpmd9yq55ppr4otf/GL86Ec/yt6zgF1VVVXF8ccfHz/72c8iSZI4/vjjo6qqapd2nWGud+zYEVOnTo3LL788RowY0aJt+D+5nOskSXa7jw/PZy6e31VVVfH1r389+/uECRNiw4YNcfXVV+82sAEAAACgZQoSePzhW0cXYretUlZWFsOHD2/1uvdVVVXtsU1NTU0sWrSoybINGzbE9u3bd/mL8ZY48MADIyLikEMOibVr18Zll11WsMDjF5/9VUH22xppmNsBAwbEAQcckA07Inb+BXqSJLFq1ao46KCDWtxXLpzx8y926P7aa+bMmXHeeedFRMSPfvSj3bbpDHNdX18ff/nLX2LJkiXZ8TY2NkaSJFFcXBwPPPBAHHPMMS3qK1dq/ra0Q/fXXu2d6/eDpmXLlsWRRx65y/r//d//jTFjxkTEzjnf3Q3l33rrrTbV7g86/PDD46c//Wm7+gAAAADY1xUk8Ohb1rMQu+00jjjiiLjyyitj9erVMWDAgIjYeR35kpKSOOyww9rVd5Ik0dDQkIthtkmmJNN8oy4sV3M7ceLE+O1vfxubNm2KPn36RMTOS+9069YtBg0alJex701ppleH77M9Jk2aFNu2bYuInffVyIdczHVFRUU8++yzTZbdcMMN8fDDD8cdd9yRDTM7UvfKyg7fZ3u0d66PPfbY6NevX8yePXuXwOPuu++O5cuXZy9/dcQRR0RdXV08/fTT8U//9E8REbFo0aKoq6vbbVjSGkuWLMmeRwAAAAC0jUtatUFDQ8Mu12svLi5ucimV+vr6Xdr07t07Kioq4thjj40xY8bEtGnT4j//8z9j/fr1ceGFF8ZZZ50VFRUV2fbPP/98bNu2LdavXx/19fWxdOnSiIgYN25cROz8a+YhQ4bEqFGjIiJiwYIF8YMf/CDOP//8PBz1vqGzzO3UqVPjiiuuiDPPPDMuv/zyePvtt2PWrFkxc+ZMl7Nqge7du8eyZcuy/96dzjDX3bp1i7Fjxzbpv7q6Onr16rXLcnavvXNdVlYWP/nJT+KUU06Js88+O84777yoqKiIP/7xjzFr1qz44he/GCeddFJE7PyW1aRJk+Kss86Kn/zkJxERcfbZZ8cJJ5wQI0eOzPb90ksvxaZNm2LNmjWxZcuW7JyPGTMmevbsGT/72c+iR48eMX78+OjWrVvcc889ce2118b3v//9XD88AAAAAPuWhF1Mnz49mTJlyh7XRcQuPyNHjsy2GTp06G7bnHPOOdk2r776anL88ccnpaWlSb9+/ZLzzjsv2bp1a5N97amf91177bXJwQcfnPTu3TupqKhIxo8fn9xwww3Jjh07cvuAdCFpmdskSZJly5Yln/nMZ5LS0tJk0KBByTe+8Y1k8+bNuXswupi9zW2SJMmUKVOS6dOnZ9t2prn+oEsvvTQ59NBD2/QY7CtyPddJkiSPP/54MmnSpCSTySQ9e/ZMxowZk/zgBz9I3nvvvSbtamtrk9NOOy0pLy9PysvLk9NOOy3ZsGFDkzZHHXXUbve5YsWKJEmS5L//+7+T0aNHJ717907Ky8uTww47LPnFL37R3ocFAAAAYJ9XlCR7uFMrAAAAAABASnQr9AAAAAAAAADaS+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6hXnq+PGxsZ48803o7y8PIqKivK1GwAAAAAAIAWSJIn6+voYOHBgdOuW++9j5C3wePPNN2Pw4MH56h4AAAAAAEih119/PQYNGpTzfvMWeJSXl0fEzoFXVFTkazcAAAAAAEAKbNy4MQYPHpzND3Itb4HH+5exqqioEHgAAAAAAAAREXm7DYablgMAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9vAce767fnO9dQJe0avUb8YPfzo5Vq98o9FDoYtZvXR+/WjY31m9dHxE76/Rfbv9rTut1PvpMsx1r18bG2dfEjrVrCz2UnOkKx5Sr8zRN5/uHn//tbQe5lo9z74N9vl3fEDc/8lK8Xd+Qs/7Zt3XkOaU2A+RPV6mxXeU4IrrWsdCx8h54bHlna753AV3Smto18XiPh2NN7ZpCD4UuZsPW9THvhV/Fhv//TcPmDVti8bxnY/OGLTnbRz76TLMd69ZF/TVzYse6dYUeSs50hWPK1XmapvP9w8//9raDXMvHuffBPt+ub4hbHn1Z4EHOdOQ5pTYD5E9XqbFd5Tgiutax0LFc0goAAAAAAEg9gQcAAAAAAJB6xfneQcO722JLnctaQWtt27w9IiLefe/dqGuoK/Bo6Eo2bdu02+UNm3JXrxs2bctJP11N4zt1saO2ttDDyInGd7pOXWrvuZ/G833Ttk17fW3ZU52AjtLcOdravj6sfsv22PBu+p67dD71W7Z3+D5z+fwAYKeu9v63K7xWdLU5oePkPfC4/8rHorRHab53A11OXeX6iMkRs1d+P2JloUfDvuDe7/6x0EPo8mpPObXQQ2A39sVz/5KF3yn0EGCv8n2Onv/zv+S1f8gnNRyA5nitYF/mklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpl/d7eBz3naNi2Ngh+d4NdDlLXlgaC968P7457KIYN/rQQg+HLmRl3YrdXs/z+P/4dFQO65uTfdSu3LBP3hehOZXzbo8eY0YXehg5sf35ZV3mniTtPffTeL5fceSVMSxz4B7X76lOQEdp7hxtjd2dz9edMSGG15TnpH/2bS+tqe/we8Lk8vkBwE5d7f1vV3it6GpzQsfJe+BRUtYzSjO98r0b6HJ69u4RERFlxWWRKckUeDR0JX169tnt8pI+uavXJX165qSfrqbbfpnoXllZ6GHkxI79uk5dau+5n8bzvU/PPnt9bdlTnYCO0tw52tq+Pqy8tEf0LUvfc5fOp7y0R4fvM5fPDwB26mrvf7vCa0VXmxM6jktaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASL28Bx6l+7lhObRFTWVNfHL7MVFTWVPoodDF9O3VL04ZOTX69uoXERG9+5bGYaccEr37luZsH/noM826V1dH+Te+Ht2rqws9lJzpCseUq/M0Tef7h5//7W0HuZaPc++DfVaVl8SXP/WRqCovyVn/7Ns68pxSmwHyp6vU2K5yHBFd61joWEVJkiT56Hjjxo2RyWSirq4uKioq8rELAAAAAAAgJfKdG7ikFQAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw6ALWb10fv1o2N9ZvXd8p+sm33Y2zvWN/u74hbn7kpXi7viEn7XIlX/vr6OMg/8xpunWW+ess4wA6H/WBQmnJ+/y2fBbYsXZtbJx9TexYu7ZN42rv9sBOrXl98VrUsQrxeJvj3POY0hnV5vl8FHh0ARu2ro95L/wqNrQzqMhVP/m2u3G2d+xv1zfELY++3KLAoyXtciVf++vo4yD/zGm6dZb56yzjADof9YFCacn7/LZ8Ftixbl3UXzMndqxb16ZxtXd7YKfWvL54LepYhXi8zXHueUzpjGo3CTwAAAAAAAD2SuABAAAAAACknsADAAAAAABIveJCD4Dc2bRtU9Q11LVr+zT54PHmauz1W7bHhne37XV9ITQ3rrb0R9eU63OFjtHZnpPOI+DDOludYt+zt8867fks0PhOXeyorW3TdkDutOT9p9eiwujIzwbmOH98xqMzqd/yXl77F3h0IZcs/E6hh9Ch8nG85//8LznvMxc667jofJwr5ILzCIDOJl+fdWpPOTUv/QKt4/1n52VuugbzSGfyXsO7ee3fJa0AAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSzz08upArjrwyhmUObPP2K+tWpOo+IB883lyN/bozJsTwmvI9rn9pTX1BrnvY3Lhaq1DHQf7l+lyhY3S256TzCPiwzlan2Pfs7bNOez4LVM67PXqMGd3q7bY/v8z9PyCHWvL+02tRYXTkZwNznD8+49GZLF3+Zhzz/fz1L/DoQvr07BOZkky7tk+TDx5vrsZeXtoj+pb13Ov6QmhuXG3pj64p1+cKHaOzPSedR8CHdbY6xb5nb5912vNZoNt+meheWdnq7Xbs1/bPXcCuWvL+02tRYXTkZwNznD8+49GZlJfmN5JwSSsAAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3h0AX179YtTRk6Nvr36dYp+8m1342zv2KvKS+LLn/pIVJWX5KRdruRrfx19HOSfOU23zjJ/nWUcQOejPlAoLXmf35bPAt2rq6P8G1+P7tXVbRpXe7cHdmrN64vXoo5ViMfbHOeex5TOqLJPfs/HoiRJknx0vHHjxshkMlFXVxcVFRX52AUAAAAAAJAS+c4NfMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/DYh7xd3xA3P/JSvF3f0Kb1AEDE+q3r41fL5sb6retbtBwAAADoGAKPfcjb9Q1xy6Mv7zXw2Nt6ACBiw9b1Me+FX8WGDwUbe1oOAAAAdAyBBwAAAAAAkHoCDwAAAAAAIPWKCz0AOl79lu2x4d1tu10OALTMpm2boq6hrsnvAAAAQOEIPPZB5//8L4UeAgCk3iULv1PoIQAAAAAf4JJWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqeceHvug686YEMNryndZ/tKaevf3AIAWuuLIK2NY5sDs7yvrVrivBwAAABSQwGMfVF7aI/qW9dztcgCgZfr07BOZkkyT3wEAAIDCcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw2IdUlZfElz/1kagqL2nTegAgom+vfnHKyKnRt1e/Fi0HAAAAOkZRkiRJPjreuHFjZDKZqKuri4qKinzsAgAAAAAASIl85wa+4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1ivPVcZIkERGxcePGfO0CAAAAAABIiffzgvfzg1zLW+BRW1sbERGDBw/O1y4AAAAAAICUqa2tjUwmk/N+8xZ49OvXLyIiXnvttbwMHEiHjRs3xuDBg+P111+PioqKQg8HKAB1AFAHgAi1AFAHgIi6uroYMmRINj/ItbwFHt267bw9SCaTUcCAqKioUAtgH6cOAOoAEKEWAOoA8H/5Qc77zUuvAAAAAAAAHUjgAQAAAAAApF7eAo+SkpK49NJLo6SkJF+7AFJALQDUAUAdACLUAkAdAPJfB4qSJEny0jMAAAAAAEAHcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqdfqwOONN96I008/PSorK6N3794xbty4WLx4cXZ9kiRx2WWXxcCBA6O0tDQ+9alPxd///vcmfTQ0NMT5558fVVVVUVZWFpMnT45Vq1a1/2iADtFcHZg/f34cd9xxUVVVFUVFRbF06dJd+lAHIP32Vgu2b98eF110URxyyCFRVlYWAwcOjDPOOCPefPPNJn2oBZBuzb0nuOyyy2LUqFFRVlYWffv2jc985jOxaNGiJn2oA5BuzdWBDzrnnHOiqKgofvjDHzZZrg5A+jVXC2bMmBFFRUVNfg4//PAmfagFkG4teU+wbNmymDx5cmQymSgvL4/DDz88Xnvttez6XNSBVgUeGzZsiIkTJ0aPHj3iD3/4Qzz//PMxe/bs2G+//bJtrr766rjmmmvi+uuvjz//+c9RU1MT//zP/xz19fXZNv/2b/8Wd955Z8ybNy8WLFgQmzZtihNOOCF27NjRqsEDHa8ldeDdd9+NiRMnxlVXXbXHftQBSLfmasHmzZvjmWeeiUsuuSSeeeaZmD9/frz44osxefLkJv2oBZBeLXlPMGLEiLj++uvj2WefjQULFsSwYcPi2GOPjbfeeivbRh2A9GpJHXjfXXfdFYsWLYqBAwfusk4dgHRraS2YNGlSrF69Ovvz+9//vsl6tQDSqyV14OWXX45PfOITMWrUqHj00Ufjr3/9a1xyySXRq1evbJuc1IGkFS666KLkE5/4xB7XNzY2JjU1NclVV12VXbZ169Ykk8kkP/7xj5MkSZJ33nkn6dGjRzJv3rxsmzfeeCPp1q1bct9997VmOEABNFcHPmjFihVJRCRLlixpslwdgPRrTS1439NPP51ERPLqq68mSaIWQNq1pQ7U1dUlEZE89NBDSZKoA5B2La0Dq1atSg444IDkueeeS4YOHZrMmTMnu04dgPRrSS2YPn16MmXKlD2uVwsg3VpSB04++eTk9NNP3+P6XNWBVn3D4+67744JEybEl770paiuro7x48fHzTffnF2/YsWKWLNmTRx77LHZZSUlJXHUUUfFwoULIyJi8eLFsX379iZtBg4cGGPHjs22ATqv5upAS6gDkH5tqQV1dXVRVFSU/QsPtQDSrbV1YNu2bXHTTTdFJpOJQw89NCLUAUi7ltSBxsbGmDZtWsyaNSsOPvjgXfpQByD9Wvqe4NFHH43q6uoYMWJEnHXWWbFu3brsOrUA0q25OtDY2Bj33ntvjBgxIo477riorq6Oj3/843HXXXdl2+SqDrQq8HjllVfixhtvjIMOOijuv//+OPfcc+OCCy6In//85xERsWbNmoiI6N+/f5Pt+vfvn123Zs2a6NmzZ/Tt23ePbYDOq7k60BLqAKRfa2vB1q1b4+KLL46pU6dGRUVFRKgFkHYtrQO/+93vok+fPtGrV6+YM2dOPPjgg1FVVRUR6gCkXUvqwPe///0oLi6OCy64YLd9qAOQfi2pBZ/97Gdj7ty58fDDD8fs2bPjz3/+cxxzzDHR0NAQEWoBpF1zdWDdunWxadOmuOqqq2LSpEnxwAMPxBe+8IU48cQT47HHHouI3NWB4tYMvLGxMSZMmBDf+973IiJi/Pjx8fe//z1uvPHGOOOMM7LtioqKmmyXJMkuyz6sJW2AwmtpHWgLdQDSozW1YPv27XHKKadEY2Nj3HDDDc32rRZAOrS0Dhx99NGxdOnSePvtt+Pmm2+Ok046KRYtWhTV1dV77FsdgHRorg4sXrw4/uu//iueeeaZVj+n1QFIj5a8Jzj55JOz7ceOHRsTJkyIoUOHxr333hsnnnjiHvtWCyAdmqsDjY2NERExZcqU+PrXvx4REePGjYuFCxfGj3/84zjqqKP22Hdr60CrvuExYMCAGDNmTJNlo0ePzt5JvaamJiJil8Rl3bp12W991NTUxLZt22LDhg17bAN0Xs3VgZZQByD9WloLtm/fHieddFKsWLEiHnzwwey3OyLUAki7ltaBsrKyGD58eBx++OFxyy23RHFxcdxyyy0RoQ5A2jVXB5544olYt25dDBkyJIqLi6O4uDheffXV+OY3vxnDhg2LCHUAuoK2/D/BgAEDYujQobF8+fKIUAsg7ZqrA1VVVVFcXNxstpCLOtCqwGPixInxwgsvNFn24osvxtChQyMi4sADD4yampp48MEHs+u3bdsWjz32WBx55JEREXHYYYdFjx49mrRZvXp1PPfcc9k2QOfVXB1oCXUA0q8lteD9sGP58uXx0EMPRWVlZZP2agGkW1vfEyRJkr18hToA6dZcHZg2bVr87W9/i6VLl2Z/Bg4cGLNmzYr7778/ItQB6Ara8p6gtrY2Xn/99RgwYEBEqAWQds3VgZ49e8Y//uM/7rVNzupAi29vniTJ008/nRQXFydXXnllsnz58mTu3LlJ7969k1/+8pfZNldddVWSyWSS+fPnJ88++2xy6qmnJgMGDEg2btyYbXPuuecmgwYNSh566KHkmWeeSY455pjk0EMPTd57773WDAcogJbUgdra2mTJkiXJvffem0REMm/evGTJkiXJ6tWrs23UAUi35mrB9u3bk8mTJyeDBg1Kli5dmqxevTr709DQkO1HLYD0aq4ObNq0Kfn2t7+d/OlPf0pWrlyZLF68OPnyl7+clJSUJM8991y2H3UA0qslnw0+bOjQocmcOXOaLFMHIN2aqwX19fXJN7/5zWThwoXJihUrkkceeSQ54ogjkgMOOMD/F0IX0ZL3BPPnz0969OiR3HTTTcny5cuT6667LunevXvyxBNPZNvkog60KvBIkiS55557krFjxyYlJSXJqFGjkptuuqnJ+sbGxuTSSy9NampqkpKSkuSTn/xk8uyzzzZps2XLluS8885L+vXrl5SWliYnnHBC8tprr7V2KECBNFcHbrvttiQidvm59NJLs23UAUi/vdWCFStW7LYORETyyCOPZNupBZBue6sDW7ZsSb7whS8kAwcOTHr27JkMGDAgmTx5cvL000836UMdgHRr7rPBh+0u8FAHIP32Vgs2b96cHHvsscn++++f9OjRIxkyZEgyffr0XZ7nagGkW0veE9xyyy3J8OHDk169eiWHHnpoctdddzVZn4s6UJQkSdKKb6cAAAAAAAB0Oq26hwcAAAAAAEBnJPAAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AACAdrvsssti3LhxhR4GAACwDytKkiQp9CAAAIDOq6ioaK/rp0+fHtdff300NDREZWVlB40KAACgKYEHAACwV2vWrMn++9e//nV897vfjRdeeCG7rLS0NDKZTCGGBgAAkOWSVgAAwF7V1NRkfzKZTBQVFe2y7MOXtJoxY0Z8/vOfj+9973vRv3//2G+//eLyyy+P9957L2bNmhX9+vWLQYMGxa233tpkX2+88UacfPLJ0bdv36isrIwpU6bEypUrO/aAAQCAVBJ4AAAAefHwww/Hm2++GY8//nhcc801cdlll8UJJ5wQffv2jUWLFsW5554b5557brz++usREbF58+Y4+uijo0+fPvH444/HggULok+fPjFp0qTYtm1bgY8GAADo7AQeAABAXvTr1y+uvfbaGDlyZMycOTNGjhwZmzdvjn//93+Pgw46KL797W9Hz54948knn4yIiHnz5kW3bt3ipz/9aRxyyCExevTouO222+K1116LRx99tLAHAwAAdHrFhR4AAADQNR188MHRrdv//Y1V//79Y+zYsdnfu3fvHpWVlbFu3bqIiFi8eHG89NJLUV5e3qSfrVu3xssvv9wxgwYAAFJL4AEAAORFjx49mvxeVFS022WNjY0REdHY2BiHHXZYzJ07d5e+9t9///wNFAAA6BIEHgAAQKfwsY99LH79619HdXV1VFRUFHo4AABAyriHBwAA0CmcdtppUVVVFVOmTIknnngiVqxYEY899lh87Wtfi1WrVhV6eAAAQCcn8AAAADqF3r17x+OPPx5DhgyJE088MUaPHh0zZ86MLVu2+MYHAADQrKIkSZJCDwIAAAAAAKA9fMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOr9fw+gShyFf/1LAAAAAElFTkSuQmCC",
"text/plain": [
""
]
@@ -458,7 +458,7 @@
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAkuklEQVR4nO3de3TU5Z0/8M9wSyAkIYDc5KJbFUSqFvAoxZ9WrXipFS+ttIu31XWlq1a2PRyta4W2btXusbS7FisWQX91xdOfYt3aWrHKrbpquVSsXbQWBRWKUu53zPf3h3U0JpCJZpI84fU6Z05gvs88zzPkw2dm8p7MN5dlWRYAAAAAAAAJa9PcGwAAAAAAAPi4BB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB57sGbNmrj88sujf//+UVJSEr169YpTTjklnn766YiIOOCAAyKXy0Uul4tOnTrFkCFD4o477sjffsaMGfnjH7yUlpbWWuupp56Ktm3bxqmnnlrr2Kuvvhq5XC6WLFmSv27Tpk3xmc98JgYNGhQrV66MiKhzrVwuFzNnzoyIiDlz5tS4vlu3bnHiiSfGb3/72wb9uzzwwAMxePDgKCkpicGDB8esWbNqjZkyZUoceOCBUVpaGsOGDYv58+c3aI19iTqrW311Nm/evPj85z8fffr0iVwuFw899FCD5t8XqbW61VdrN910Uxx11FFRXl4ePXr0iLPOOiuWLVvWoDX2JeqsbvXV2e233x6HH354VFRUREVFRYwYMSJ+9atfNWiNfY1aq1shz9Pec9NNN0Uul4vx48c3aA0AAICWrF2zrLrlraZdr2y/Bt/k3HPPjV27dsXdd98df/d3fxd/+ctf4je/+U389a9/zY/59re/HZdddlls3rw5ZsyYEePGjYsuXbrEmDFjIiKioqKi1g/GcrlcrbXuuuuuuOqqq+InP/lJrFixIvr377/Hfb311ltx2mmnRUTEggULonv37vlj06dPr/VivEuXLjX+vmzZsqioqIi33norbrzxxvjc5z4XL730UvTo0aPef5Onn346xowZE9/5znfi7LPPjlmzZsV5550XCxYsiKOPPjoiIu6///4YP358TJkyJUaOHBl33HFHnHbaafHiiy/u9X4Vw/aNf61/UCMqreja4Nuos9oKqbMtW7bEEUccEf/wD/8Q5557br1zFt+6Jl6vqsG3UGu1FVJrc+fOjSuuuCKOOuqo2L17d/zrv/5rjBo1Kl588cUoKyurd43GtG3btiZdr2PHjg2+jTqrrZA669u3b9x8881x0EEHRUTE3XffHaNHj47FixfHYYcdVu8ajS3bsrNJ18uVdWjwbdRabYXU2nuee+65mDp1ahx++OH1zgsAAJCSXJZlWZOvOqn2i8nirtewu7h+/fqoqqqKOXPmxPHHH1/nmAMOOCDGjx9f411xhxxySAwbNizuu+++mDFjRowfPz7Wr1+/17W2bNkSvXv3jueeey4mTpwYgwcPjhtuuCF//NVXX40DDzwwFi9eHN26dYuTTz45evfuHQ8//HCUl5fnx+VyuZg1a1acddZZda4zZ86cOOGEE2LdunX5F9dLly6Nww8/PB5++OH4/Oc/X++/y5gxY2Ljxo013nV66qmnRlVVVdx3330REXH00UfH0KFD4/bbb8+POfTQQ+Oss86Km266qd41GtN9Yw5t0vW+fP8fGzRendWtkDr7oPr21DSGN/F6v2vQaLVWt4bWWsS7P8zs0aNHzJ07N4477rh612hMU6dObdL1/umf/qlB49VZ3T5KnUVEdO3aNf793/89Lr300nrXaGzbvz2nSdcrveEzDRqv1upWaK1t3rw5hg4dGlOmTIkbb7wxjjzyyPjBD35Q7/wAAAAp8JFWdejcuXN07tw5HnroodixY0fBtystLY1du3Y1aK37778/Bg4cGAMHDozzzz8/pk+fHnVlUMuWLYuRI0fGoEGD4tFHH63xIvqj2Lp1a0yfPj0iItq3b1/QbZ5++ukYNWpUjetOOeWUeOqppyIiYufOnbFw4cJaY0aNGpUfw/vUWd3qqzMaTq3V7aPU2oYNGyLi3R9GU5M6q1tD6+ydd96JmTNnxpYtW2LEiBEfa7+tlVqrW6G1dsUVV8TnPve5+OxnP/ux9ggAANASCTzq0K5du5gxY0bcfffd0aVLlxg5cmRcd9118fzzz9c5fvfu3TFjxoxYunRpnHTSSfnrN2zYkH9R/t7lwy9Ep02bFueff35EvPsuvM2bN8dvfvObWmtceOGF8YlPfCIeeOCBKCkpqXMfX/7yl2ut9+c//7nGmL59++aPTZ48OYYNG1Zjz3uzevXq6NmzZ43revbsGatXr46IiLfffjveeeedvY7hfeqsbvXVGQ2n1urW0FrLsiy+9rWvxbHHHhtDhgwpaI19iTqrW6F1tnTp0ujcuXOUlJTEuHHjYtasWTF48OCC1tjXqLW6FVJrM2fOjEWLFjX5b90CAAA0FYHHHpx77rnx5ptvxsMPPxynnHJKzJkzJ4YOHRozZszIj7nmmmuic+fO0bFjx7jiiitiwoQJcfnll+ePl5eXx5IlS2pc3nu3XsS77wZ89tln40tf+lJEvPsCfsyYMXHXXXfV2s/o0aNjwYIF8cADD+xxz5MnT661Xr9+/WqMmT9/fixatCjuu+++GDBgQMyYMaPgdw5G1P5s6yzLal1XyBjepc7qpoYan1qrW0Nq7corr4znn39+rx9DtK9TZ3UrpM4GDhwYS5Ysif/5n/+Jr3zlK3HRRRfFiy++WPAa+xq1Vre91drKlSvj6quvjp/+9Kd1npwdAACgNWiek5ZPWNMsyzZUaWlpnHzyyXHyySfHDTfcEP/4j/8YEydOjIsvvjgiIiZMmBAXX3xxdOrUKXr37l3rRWabNm3yJyCty7Rp02L37t2x//7756/Lsizat28f69ati6qq909MfN1118Xhhx8eY8eOjSzL8ifc/KBevXrtdb2IiAMPPDC6dOkShxxySGzfvj3OPvvseOGFF/b4bsQPz//hd6SuWbMm/27C7t27R9u2bfc6pimdfedvm3zNj0Kd1Z6/pdRQ4WY39wYKotZqz19orV111VXx8MMPx7x586Jv3771zl0MF1xwQbOs21DqrPb8hdRZhw4d8vsYPnx4PPfcc/HDH/4w7rjjjnrXaGwlX/90k6/5Uai12vPvrdYWLlwYa9asiWHDhuWPv/POOzFv3ry47bbbYseOHdG2bdt61wEAAGjJmuc3PMr2a9pLIxk8eHBs2bIl//fu3bvHQQcdFH369Gnwu893794d99xzT9x666013un3+9//PgYMGBD33ntvrdtcf/318Z3vfCfGjh3bKO8wvuCCC6K6ujqmTJlS0PgRI0bE7Nk1f7D72GOPxac//e4PRjp06BDDhg2rNWb27Nn5MU2ptKJrk14aizrbe521TFVNfGkcaq3+WsuyLK688sp48MEH44knnogDDzzwY+/zo+rYsWOTXhqLOvtoPS3Lsgadn6Ix5co6NOmlsai1vdfaSSedFEuXLq1xf4YPHx5jx46NJUuWCDsAAIBWoXl+w6OFW7t2bXzxi1+MSy65JA4//PAoLy+P3/3ud/G9730vRo8eXfA8WZbV+VnwPXr0iF/84hexbt26uPTSS6OysrLG8S984Qsxbdq0uPLKK2vd9tprr422bdvmXwSPHTs2f2z9+vW11isvL4+ysrI699emTZsYP3583HjjjXH55ZdHp06d9np/rr766jjuuOPilltuidGjR8fPf/7zePzxx2PBggX5MV/72tfiggsuiOHDh8eIESNi6tSpsWLFihg3btxe594XqbO6FVJnmzdvjj/96U/5vy9fvjyWLFkSXbt2jf79++91/n2RWqtbIbV2xRVXxH/913/Fz3/+8ygvL8/vp7KyslFDgdZAndWtkDq77rrr4rTTTot+/frFpk2bYubMmTFnzpx49NFH9zr3vkqt1a2+WisvL691/qGysrLo1q2b8xIBAACtR0Yt27dvz6699tps6NChWWVlZdapU6ds4MCB2fXXX59t3bo1y7IsGzBgQDZ58uQ9zjF9+vQsIuq8rFq1KjvjjDOy008/vc7bLly4MIuIbOHChdny5cuziMgWL15cY8ytt96atW3bNrvnnnuyLMv2uNZNN92UZVmWPfnkk1lEZOvWrasxz+bNm7OqqqrslltuKejf5mc/+1k2cODArH379tmgQYOyBx54oNaYH/3oR9mAAQOyDh06ZEOHDs3mzp1b0Nz7GnW2Z/XV2XvrfPhy0UUXFTT/vkat7Vl9tbanfUyfPr2g+fcl6mzP6quzSy65JP+4ud9++2UnnXRS9thjjxU0975Ire1ZIc/TPuj444/Prr766oLmBgAASEEuy7KsIQEJAAAAAABAS9M85/AAAAAAAABoRAIP8jp37rzHy/z585t7e7QS6oymotZoCuqMpqLWAAAA6ucjrcj74EmgP2z//fd3gl4ahTqjqag1moI6o6moNQAAgPoJPAAAAAAAgOT5SCsAAAAAACB5Ag8AAAAAACB57Yo1cXV1dbz55ptRXl4euVyuWMsAAAAAAAAJyLIsNm3aFH369Ik2bRr/9zGKFni8+eab0a9fv2JNDwAAAAAAJGjlypXRt2/fRp+3aIFHeXl5RLy78YqKimItAwAAAAAAJGDjxo3Rr1+/fH7Q2IoWeLz3MVYVFRUCDwAAAAAAICKiaKfBcNJyAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgecUPPDatLvoS0CQ2rYp4ctK7X4vu7Yi4429foaVQl7Q279d0tmlVrH/kZ/HM//1FLL97bmxd/lbsmrM8sk07at1q69at8bvf/S62bt1a48+pKGTPKd4vWotiP9Z4LEtTId+31vy9bc33DaClS6kHp7HXbev+N5b+7OLYtu5/m3srNJNt694q6vzFDzw2CzxoJTatipj7rSYMPO6Mlv4gxb5GXdLavF/T2ea3Y8vvy2PlxrXR+7UsdqzaEO/Mey2yzTtr3Wrr1q2xaNGifODx3p9TUcieU7xftBbFfqzxWJamQr5vrfl725rvG0BLl1IPTmOv29a9Ei/8v2di27pXmnsrNJNt64tboz7SCgAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASF67oq+wfX3EluKeiASaxPZ1zbDoxohojnWhLhubewNQJBsjorrmVTveqfdW27dvL852msj27dtj27ZtezwGzatYz4E8lqVtb3WxL3xvvTYAaHopPr609MeLLRERsXPL5ti+8a/NvBeaw66tm4o6f/EDj5mjI0qKvgq0Uv/c3BsA2Af8c0TsHxFfyV/Tcf6qem/1y1/+snhbagKp75/WznMg6rKv18W+fv8BKExLf7x4981lT944KSImNedGaCZbd9X/BsOPw0daAQAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAySv+OTy+9POIg0YUfRkour88H3HPZ5t40SkRcXATrwl78nK0/M8ChY9iSrx70vL1+Wu2/Z/e9Z7H4/TTT4+IdM+Fcfrpp0e3bt3qPLZ27dpk7xetRbGeA3ksS9ve6mJf+N56bQDQ9FJ8fGnpjxezI2JinHD9pOgy4OTm3gzNYOUfFkXMKt73vviBR2mXiLL9ir4MFF1pVTMsWhERzbEu1KWiuTcARVIREbvig4FHlLSt91alpaXF2lCTKC0tjY4dO+7xGDSvYj0H8liWtr3Vxb7wvfXaAKDppfj40tIfL8oiIqJDWecorejazHuhObTvVF7U+X2kFQAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkLzin7S8c6+iLwFNorx3xPET3/1adN0j4rK/fYWWQl3S2rxf07nOu6LsiP+Nfn/tFqs656Jn78poe1zbyHXuUOtWnTp1iqFDh0anTp0iImr8OQUf3v9HHQPFUezHGo9laSrk+9aav7et+b4BtHQp9eA09tqx6hMx5AtHR8eqTzT3VmgmHbsUt0ZzWZZlxZh448aNUVlZGRs2bIiKiopiLAEAAAAAACSi2LmBj7QCAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/BI3tsRccffvrbE+YCmsG3dmlj6s9ti27o1zb0VAFqp1vBY0xruAy2LmgKAj+PtiJj8t4ufRdI4BB7Jezsi7ozGDTwacz6gKWxb91a88P9+FNvWvdXcWwGglWoNjzWt4T7QsqgpAPg43o6Ie/928bNIGofAAwAAAAAASJ7AAwAAAAAASF675t4AjWVjRKxrpHmAVO3csjG2b/xrc28DgFZo55bW8zzR4yWNpTX9vwAAaA0EHq3GPzf3BoAW4MkbL2nuLQBAi+fxEgAAWicfaQUAAAAAACRP4AEAAAAAACRP4AEAAAAAACTPOTxajSkRcXAjzPNyOB8IpOuE6++KLgMGNvc2AGiF1r+2rNWc+8LjJY2lNf2/AABoDQQerUZFRFQ10jxAqjqUVURpRdfm3gYArVCHstbzPNHjJY2lNf2/AABoDXykFQAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDwnLU9e94i47G9fW+J8QFPoWLVfDPnCFdGxar/m3goArVRreKxpDfeBlkVNAcDH0T0ixn7gz/Dx5bIsy4ox8caNG6OysjI2bNgQFRUVxVgCAAAAAABIRLFzAx9pBQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJK9dsSbOsiwiIjZu3FisJQAAAAAAgES8lxe8lx80tqIFHmvXro2IiH79+hVrCQAAAAAAIDFr166NysrKRp+3aIFH165dIyJixYoVRdk4kIaNGzdGv379YuXKlVFRUdHc2wGagT4A6ANAhF4A6ANAxIYNG6J///75/KCxFS3waNPm3dODVFZWamBAVFRU6AWwj9MHAH0AiNALAH0AeD8/aPR5izIrAAAAAABAExJ4AAAAAAAAySta4FFSUhITJ06MkpKSYi0BJEAvAPQBQB8AIvQCQB8Ait8HclmWZUWZGQAAAAAAoIn4SCsAAAAAACB5Ag8AAAAAACB5Ag8AAAAAACB5Ag8AAAAAACB5DQ483njjjTj//POjW7du0alTpzjyyCNj4cKF+eNZlsWkSZOiT58+0bFjx/jMZz4Tf/jDH2rMsWPHjrjqqquie/fuUVZWFmeeeWa8/vrrH//eAE2ivj7w4IMPximnnBLdu3ePXC4XS5YsqTWHPgDp21sv2LVrV1xzzTXxyU9+MsrKyqJPnz5x4YUXxptvvlljDr0A0lbfc4JJkybFoEGDoqysLKqqquKzn/1sPPPMMzXm0AcgbfX1gQ+6/PLLI5fLxQ9+8IMa1+sDkL76esHFF18cuVyuxuWYY46pMYdeAGkr5DnBH//4xzjzzDOjsrIyysvL45hjjokVK1bkjzdGH2hQ4LFu3boYOXJktG/fPn71q1/Fiy++GLfeemt06dIlP+Z73/tefP/734/bbrstnnvuuejVq1ecfPLJsWnTpvyY8ePHx6xZs2LmzJmxYMGC2Lx5c5xxxhnxzjvvNGjzQNMrpA9s2bIlRo4cGTfffPMe59EHIG319YKtW7fGokWL4pvf/GYsWrQoHnzwwXjppZfizDPPrDGPXgDpKuQ5wSGHHBK33XZbLF26NBYsWBAHHHBAjBo1Kt566638GH0A0lVIH3jPQw89FM8880z06dOn1jF9ANJWaC849dRTY9WqVfnLL3/5yxrH9QJIVyF94JVXXoljjz02Bg0aFHPmzInf//738c1vfjNKS0vzYxqlD2QNcM0112THHnvsHo9XV1dnvXr1ym6++eb8ddu3b88qKyuzH//4x1mWZdn69euz9u3bZzNnzsyPeeONN7I2bdpkjz76aEO2AzSD+vrABy1fvjyLiGzx4sU1rtcHIH0N6QXvefbZZ7OIyF577bUsy/QCSN1H6QMbNmzIIiJ7/PHHsyzTByB1hfaB119/Pdt///2zF154IRswYEA2efLk/DF9ANJXSC+46KKLstGjR+/xuF4AaSukD4wZMyY7//zz93i8sfpAg37D4+GHH47hw4fHF7/4xejRo0d86lOfijvvvDN/fPny5bF69eoYNWpU/rqSkpI4/vjj46mnnoqIiIULF8auXbtqjOnTp08MGTIkPwZouerrA4XQByB9H6UXbNiwIXK5XP4dHnoBpK2hfWDnzp0xderUqKysjCOOOCIi9AFIXSF9oLq6Oi644IKYMGFCHHbYYbXm0AcgfYU+J5gzZ0706NEjDjnkkLjssstizZo1+WN6AaStvj5QXV0djzzySBxyyCFxyimnRI8ePeLoo4+Ohx56KD+msfpAgwKPP//5z3H77bfHwQcfHL/+9a9j3Lhx8dWvfjXuueeeiIhYvXp1RET07Nmzxu169uyZP7Z69ero0KFDVFVV7XEM0HLV1wcKoQ9A+hraC7Zv3x7XXntt/P3f/31UVFREhF4AqSu0D/ziF7+Izp07R2lpaUyePDlmz54d3bt3jwh9AFJXSB+45ZZbol27dvHVr361zjn0AUhfIb3gtNNOi3vvvTeeeOKJuPXWW+O5556LE088MXbs2BERegGkrr4+sGbNmti8eXPcfPPNceqpp8Zjjz0WZ599dpxzzjkxd+7ciGi8PtCuIRuvrq6O4cOHx3e/+92IiPjUpz4Vf/jDH+L222+PCy+8MD8ul8vVuF2WZbWu+7BCxgDNr9A+8FHoA5COhvSCXbt2xZe+9KWorq6OKVOm1Du3XgBpKLQPnHDCCbFkyZJ4++23484774zzzjsvnnnmmejRo8ce59YHIA319YGFCxfGD3/4w1i0aFGD/0/rA5COQp4TjBkzJj9+yJAhMXz48BgwYEA88sgjcc455+xxbr0A0lBfH6iuro6IiNGjR8e//Mu/RETEkUceGU899VT8+Mc/juOPP36Pcze0DzToNzx69+4dgwcPrnHdoYcemj+Teq9evSIiaiUua9asyf/WR69evWLnzp2xbt26PY4BWq76+kAh9AFIX6G9YNeuXXHeeefF8uXLY/bs2fnf7ojQCyB1hfaBsrKyOOigg+KYY46JadOmRbt27WLatGkRoQ9A6urrA/Pnz481a9ZE//79o127dtGuXbt47bXX4utf/3occMABEaEPQGvwUX5O0Lt37xgwYEC8/PLLEaEXQOrq6wPdu3ePdu3a1ZstNEYfaFDgMXLkyFi2bFmN61566aUYMGBAREQceOCB0atXr5g9e3b++M6dO2Pu3Lnx6U9/OiIihg0bFu3bt68xZtWqVfHCCy/kxwAtV319oBD6AKSvkF7wXtjx8ssvx+OPPx7dunWrMV4vgLR91OcEWZblP75CH4C01dcHLrjggnj++edjyZIl+UufPn1iwoQJ8etf/zoi9AFoDT7Kc4K1a9fGypUro3fv3hGhF0Dq6usDHTp0iKOOOmqvYxqtDxR8evMsy5599tmsXbt22b/9279lL7/8cnbvvfdmnTp1yn7605/mx9x8881ZZWVl9uCDD2ZLly7NvvzlL2e9e/fONm7cmB8zbty4rG/fvtnjjz+eLVq0KDvxxBOzI444Itu9e3dDtgM0g0L6wNq1a7PFixdnjzzySBYR2cyZM7PFixdnq1atyo/RByBt9fWCXbt2ZWeeeWbWt2/fbMmSJdmqVavylx07duTn0QsgXfX1gc2bN2ff+MY3sqeffjp79dVXs4ULF2aXXnppVlJSkr3wwgv5efQBSFchrw0+bMCAAdnkyZNrXKcPQNrq6wWbNm3Kvv71r2dPPfVUtnz58uzJJ5/MRowYke2///5+XgitRCHPCR588MGsffv22dSpU7OXX345+8///M+sbdu22fz58/NjGqMPNCjwyLIs++///u9syJAhWUlJSTZo0KBs6tSpNY5XV1dnEydOzHr16pWVlJRkxx13XLZ06dIaY7Zt25ZdeeWVWdeuXbOOHTtmZ5xxRrZixYqGbgVoJvX1genTp2cRUesyceLE/Bh9ANK3t16wfPnyOvtARGRPPvlkfpxeAGnbWx/Ytm1bdvbZZ2d9+vTJOnTokPXu3Ts788wzs2effbbGHPoApK2+1wYfVlfgoQ9A+vbWC7Zu3ZqNGjUq22+//bL27dtn/fv3zy666KJa/8/1AkhbIc8Jpk2blh100EFZaWlpdsQRR2QPPfRQjeON0QdyWZZlDfjtFAAAAAAAgBanQefwAAAAAAAAaIkEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAwMc2adKkOPLII5t7GwAAwD4sl2VZ1tybAAAAWq5cLrfX4xdddFHcdtttsWPHjujWrVsT7QoAAKAmgQcAALBXq1evzv/5/vvvjxtuuCGWLVuWv65jx45RWVnZHFsDAADI85FWAADAXvXq1St/qaysjFwuV+u6D3+k1cUXXxxnnXVWfPe7342ePXtGly5d4lvf+lbs3r07JkyYEF27do2+ffvGXXfdVWOtN954I8aMGRNVVVXRrVu3GD16dLz66qtNe4cBAIAkCTwAAICieOKJJ+LNN9+MefPmxfe///2YNGlSnHHGGVFVVRXPPPNMjBs3LsaNGxcrV66MiIitW7fGCSecEJ07d4558+bFggULonPnznHqqafGzp07m/neAAAALZ3AAwAAKIquXbvGf/zHf8TAgQPjkksuiYEDB8bWrVvjuuuui4MPPji+8Y1vRIcOHeK3v/1tRETMnDkz2rRpEz/5yU/ik5/8ZBx66KExffr0WLFiRcyZM6d57wwAANDitWvuDQAAAK3TYYcdFm3avP8eq549e8aQIUPyf2/btm1069Yt1qxZExERCxcujD/96U9RXl5eY57t27fHK6+80jSbBgAAkiXwAAAAiqJ9+/Y1/p7L5eq8rrq6OiIiqqurY9iwYXHvvffWmmu//fYr3kYBAIBWQeABAAC0CEOHDo37778/evToERUVFc29HQAAIDHO4QEAALQIY8eOje7du8fo0aNj/vz5sXz58pg7d25cffXV8frrrzf39gAAgBZO4AEAALQInTp1innz5kX//v3jnHPOiUMPPTQuueSS2LZtm9/4AAAA6pXLsixr7k0AAAAAAAB8HH7DAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASN7/B8CasjLyMJvqAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAkuklEQVR4nO3de3TU5Z0/8M9wSyAkIYDc5KJbFUSqFvAoxZ9WrXipFS+ttIu31XWlq1a2PRyta4W2btXusbS7FisWQX91xdOfYt3aWrHKrbpquVSsXbQWBRWKUu53zPf3h3U0JpCJZpI84fU6Z05gvs88zzPkw2dm8p7MN5dlWRYAAAAAAAAJa9PcGwAAAAAAAPi4BB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB4AAAAAAEDyBB57sGbNmrj88sujf//+UVJSEr169YpTTjklnn766YiIOOCAAyKXy0Uul4tOnTrFkCFD4o477sjffsaMGfnjH7yUlpbWWuupp56Ktm3bxqmnnlrr2Kuvvhq5XC6WLFmSv27Tpk3xmc98JgYNGhQrV66MiKhzrVwuFzNnzoyIiDlz5tS4vlu3bnHiiSfGb3/72wb9uzzwwAMxePDgKCkpicGDB8esWbNqjZkyZUoceOCBUVpaGsOGDYv58+c3aI19iTqrW311Nm/evPj85z8fffr0iVwuFw899FCD5t8XqbW61VdrN910Uxx11FFRXl4ePXr0iLPOOiuWLVvWoDX2JeqsbvXV2e233x6HH354VFRUREVFRYwYMSJ+9atfNWiNfY1aq1shz9Pec9NNN0Uul4vx48c3aA0AAICWrF2zrLrlraZdr2y/Bt/k3HPPjV27dsXdd98df/d3fxd/+ctf4je/+U389a9/zY/59re/HZdddlls3rw5ZsyYEePGjYsuXbrEmDFjIiKioqKi1g/GcrlcrbXuuuuuuOqqq+InP/lJrFixIvr377/Hfb311ltx2mmnRUTEggULonv37vlj06dPr/VivEuXLjX+vmzZsqioqIi33norbrzxxvjc5z4XL730UvTo0aPef5Onn346xowZE9/5znfi7LPPjlmzZsV5550XCxYsiKOPPjoiIu6///4YP358TJkyJUaOHBl33HFHnHbaafHiiy/u9X4Vw/aNf61/UCMqreja4Nuos9oKqbMtW7bEEUccEf/wD/8Q5557br1zFt+6Jl6vqsG3UGu1FVJrc+fOjSuuuCKOOuqo2L17d/zrv/5rjBo1Kl588cUoKyurd43GtG3btiZdr2PHjg2+jTqrrZA669u3b9x8881x0EEHRUTE3XffHaNHj47FixfHYYcdVu8ajS3bsrNJ18uVdWjwbdRabYXU2nuee+65mDp1ahx++OH1zgsAAJCSXJZlWZOvOqn2i8nirtewu7h+/fqoqqqKOXPmxPHHH1/nmAMOOCDGjx9f411xhxxySAwbNizuu+++mDFjRowfPz7Wr1+/17W2bNkSvXv3jueeey4mTpwYgwcPjhtuuCF//NVXX40DDzwwFi9eHN26dYuTTz45evfuHQ8//HCUl5fnx+VyuZg1a1acddZZda4zZ86cOOGEE2LdunX5F9dLly6Nww8/PB5++OH4/Oc/X++/y5gxY2Ljxo013nV66qmnRlVVVdx3330REXH00UfH0KFD4/bbb8+POfTQQ+Oss86Km266qd41GtN9Yw5t0vW+fP8fGzRendWtkDr7oPr21DSGN/F6v2vQaLVWt4bWWsS7P8zs0aNHzJ07N4477rh612hMU6dObdL1/umf/qlB49VZ3T5KnUVEdO3aNf793/89Lr300nrXaGzbvz2nSdcrveEzDRqv1upWaK1t3rw5hg4dGlOmTIkbb7wxjjzyyPjBD35Q7/wAAAAp8JFWdejcuXN07tw5HnroodixY0fBtystLY1du3Y1aK37778/Bg4cGAMHDozzzz8/pk+fHnVlUMuWLYuRI0fGoEGD4tFHH63xIvqj2Lp1a0yfPj0iItq3b1/QbZ5++ukYNWpUjetOOeWUeOqppyIiYufOnbFw4cJaY0aNGpUfw/vUWd3qqzMaTq3V7aPU2oYNGyLi3R9GU5M6q1tD6+ydd96JmTNnxpYtW2LEiBEfa7+tlVqrW6G1dsUVV8TnPve5+OxnP/ux9ggAANASCTzq0K5du5gxY0bcfffd0aVLlxg5cmRcd9118fzzz9c5fvfu3TFjxoxYunRpnHTSSfnrN2zYkH9R/t7lwy9Ep02bFueff35EvPsuvM2bN8dvfvObWmtceOGF8YlPfCIeeOCBKCkpqXMfX/7yl2ut9+c//7nGmL59++aPTZ48OYYNG1Zjz3uzevXq6NmzZ43revbsGatXr46IiLfffjveeeedvY7hfeqsbvXVGQ2n1urW0FrLsiy+9rWvxbHHHhtDhgwpaI19iTqrW6F1tnTp0ujcuXOUlJTEuHHjYtasWTF48OCC1tjXqLW6FVJrM2fOjEWLFjX5b90CAAA0FYHHHpx77rnx5ptvxsMPPxynnHJKzJkzJ4YOHRozZszIj7nmmmuic+fO0bFjx7jiiitiwoQJcfnll+ePl5eXx5IlS2pc3nu3XsS77wZ89tln40tf+lJEvPsCfsyYMXHXXXfV2s/o0aNjwYIF8cADD+xxz5MnT661Xr9+/WqMmT9/fixatCjuu+++GDBgQMyYMaPgdw5G1P5s6yzLal1XyBjepc7qpoYan1qrW0Nq7corr4znn39+rx9DtK9TZ3UrpM4GDhwYS5Ysif/5n/+Jr3zlK3HRRRfFiy++WPAa+xq1Vre91drKlSvj6quvjp/+9Kd1npwdAACgNWiek5ZPWNMsyzZUaWlpnHzyyXHyySfHDTfcEP/4j/8YEydOjIsvvjgiIiZMmBAXX3xxdOrUKXr37l3rRWabNm3yJyCty7Rp02L37t2x//7756/Lsizat28f69ati6qq909MfN1118Xhhx8eY8eOjSzL8ifc/KBevXrtdb2IiAMPPDC6dOkShxxySGzfvj3OPvvseOGFF/b4bsQPz//hd6SuWbMm/27C7t27R9u2bfc6pimdfedvm3zNj0Kd1Z6/pdRQ4WY39wYKotZqz19orV111VXx8MMPx7x586Jv3771zl0MF1xwQbOs21DqrPb8hdRZhw4d8vsYPnx4PPfcc/HDH/4w7rjjjnrXaGwlX/90k6/5Uai12vPvrdYWLlwYa9asiWHDhuWPv/POOzFv3ry47bbbYseOHdG2bdt61wEAAGjJmuc3PMr2a9pLIxk8eHBs2bIl//fu3bvHQQcdFH369Gnwu893794d99xzT9x666013un3+9//PgYMGBD33ntvrdtcf/318Z3vfCfGjh3bKO8wvuCCC6K6ujqmTJlS0PgRI0bE7Nk1f7D72GOPxac//e4PRjp06BDDhg2rNWb27Nn5MU2ptKJrk14aizrbe521TFVNfGkcaq3+WsuyLK688sp48MEH44knnogDDzzwY+/zo+rYsWOTXhqLOvtoPS3Lsgadn6Ix5co6NOmlsai1vdfaSSedFEuXLq1xf4YPHx5jx46NJUuWCDsAAIBWoXl+w6OFW7t2bXzxi1+MSy65JA4//PAoLy+P3/3ud/G9730vRo8eXfA8WZbV+VnwPXr0iF/84hexbt26uPTSS6OysrLG8S984Qsxbdq0uPLKK2vd9tprr422bdvmXwSPHTs2f2z9+vW11isvL4+ysrI699emTZsYP3583HjjjXH55ZdHp06d9np/rr766jjuuOPilltuidGjR8fPf/7zePzxx2PBggX5MV/72tfiggsuiOHDh8eIESNi6tSpsWLFihg3btxe594XqbO6FVJnmzdvjj/96U/5vy9fvjyWLFkSXbt2jf79++91/n2RWqtbIbV2xRVXxH/913/Fz3/+8ygvL8/vp7KyslFDgdZAndWtkDq77rrr4rTTTot+/frFpk2bYubMmTFnzpx49NFH9zr3vkqt1a2+WisvL691/qGysrLo1q2b8xIBAACtR0Yt27dvz6699tps6NChWWVlZdapU6ds4MCB2fXXX59t3bo1y7IsGzBgQDZ58uQ9zjF9+vQsIuq8rFq1KjvjjDOy008/vc7bLly4MIuIbOHChdny5cuziMgWL15cY8ytt96atW3bNrvnnnuyLMv2uNZNN92UZVmWPfnkk1lEZOvWrasxz+bNm7OqqqrslltuKejf5mc/+1k2cODArH379tmgQYOyBx54oNaYH/3oR9mAAQOyDh06ZEOHDs3mzp1b0Nz7GnW2Z/XV2XvrfPhy0UUXFTT/vkat7Vl9tbanfUyfPr2g+fcl6mzP6quzSy65JP+4ud9++2UnnXRS9thjjxU0975Ire1ZIc/TPuj444/Prr766oLmBgAASEEuy7KsIQEJAAAAAABAS9M85/AAAAAAAABoRAIP8jp37rzHy/z585t7e7QS6oymotZoCuqMpqLWAAAA6ucjrcj74EmgP2z//fd3gl4ahTqjqag1moI6o6moNQAAgPoJPAAAAAAAgOT5SCsAAAAAACB5Ag8AAAAAACB57Yo1cXV1dbz55ptRXl4euVyuWMsAAAAAAAAJyLIsNm3aFH369Ik2bRr/9zGKFni8+eab0a9fv2JNDwAAAAAAJGjlypXRt2/fRp+3aIFHeXl5RLy78YqKimItAwAAAAAAJGDjxo3Rr1+/fH7Q2IoWeLz3MVYVFRUCDwAAAAAAICKiaKfBcNJyAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgeQIPAAAAAAAgecUPPDatLvoS0CQ2rYp4ctK7X4vu7Yi4429foaVQl7Q279d0tmlVrH/kZ/HM//1FLL97bmxd/lbsmrM8sk07at1q69at8bvf/S62bt1a48+pKGTPKd4vWotiP9Z4LEtTId+31vy9bc33DaClS6kHp7HXbev+N5b+7OLYtu5/m3srNJNt694q6vzFDzw2CzxoJTatipj7rSYMPO6Mlv4gxb5GXdLavF/T2ea3Y8vvy2PlxrXR+7UsdqzaEO/Mey2yzTtr3Wrr1q2xaNGifODx3p9TUcieU7xftBbFfqzxWJamQr5vrfl725rvG0BLl1IPTmOv29a9Ei/8v2di27pXmnsrNJNt64tboz7SCgAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASF67oq+wfX3EluKeiASaxPZ1zbDoxohojnWhLhubewNQJBsjorrmVTveqfdW27dvL852msj27dtj27ZtezwGzatYz4E8lqVtb3WxL3xvvTYAaHopPr609MeLLRERsXPL5ti+8a/NvBeaw66tm4o6f/EDj5mjI0qKvgq0Uv/c3BsA2Af8c0TsHxFfyV/Tcf6qem/1y1/+snhbagKp75/WznMg6rKv18W+fv8BKExLf7x4981lT944KSImNedGaCZbd9X/BsOPw0daAQAAAAAAyRN4AAAAAAAAyRN4AAAAAAAAySv+OTy+9POIg0YUfRkour88H3HPZ5t40SkRcXATrwl78nK0/M8ChY9iSrx70vL1+Wu2/Z/e9Z7H4/TTT4+IdM+Fcfrpp0e3bt3qPLZ27dpk7xetRbGeA3ksS9ve6mJf+N56bQDQ9FJ8fGnpjxezI2JinHD9pOgy4OTm3gzNYOUfFkXMKt73vviBR2mXiLL9ir4MFF1pVTMsWhERzbEu1KWiuTcARVIREbvig4FHlLSt91alpaXF2lCTKC0tjY4dO+7xGDSvYj0H8liWtr3Vxb7wvfXaAKDppfj40tIfL8oiIqJDWecorejazHuhObTvVF7U+X2kFQAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkLzin7S8c6+iLwFNorx3xPET3/1adN0j4rK/fYWWQl3S2rxf07nOu6LsiP+Nfn/tFqs656Jn78poe1zbyHXuUOtWnTp1iqFDh0anTp0iImr8OQUf3v9HHQPFUezHGo9laSrk+9aav7et+b4BtHQp9eA09tqx6hMx5AtHR8eqTzT3VmgmHbsUt0ZzWZZlxZh448aNUVlZGRs2bIiKiopiLAEAAAAAACSi2LmBj7QCAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/AAAAAAAACSJ/BI3tsRccffvrbE+YCmsG3dmlj6s9ti27o1zb0VAFqp1vBY0xruAy2LmgKAj+PtiJj8t4ufRdI4BB7Jezsi7ozGDTwacz6gKWxb91a88P9+FNvWvdXcWwGglWoNjzWt4T7QsqgpAPg43o6Ie/928bNIGofAAwAAAAAASJ7AAwAAAAAASF675t4AjWVjRKxrpHmAVO3csjG2b/xrc28DgFZo55bW8zzR4yWNpTX9vwAAaA0EHq3GPzf3BoAW4MkbL2nuLQBAi+fxEgAAWicfaQUAAAAAACRP4AEAAAAAACRP4AEAAAAAACTPOTxajSkRcXAjzPNyOB8IpOuE6++KLgMGNvc2AGiF1r+2rNWc+8LjJY2lNf2/AABoDQQerUZFRFQ10jxAqjqUVURpRdfm3gYArVCHstbzPNHjJY2lNf2/AABoDXykFQAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDyBBwAAAAAAkDwnLU9e94i47G9fW+J8QFPoWLVfDPnCFdGxar/m3goArVRreKxpDfeBlkVNAcDH0T0ixn7gz/Dx5bIsy4ox8caNG6OysjI2bNgQFRUVxVgCAAAAAABIRLFzAx9pBQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJE/gAQAAAAAAJK9dsSbOsiwiIjZu3FisJQAAAAAAgES8lxe8lx80tqIFHmvXro2IiH79+hVrCQAAAAAAIDFr166NysrKRp+3aIFH165dIyJixYoVRdk4kIaNGzdGv379YuXKlVFRUdHc2wGagT4A6ANAhF4A6ANAxIYNG6J///75/KCxFS3waNPm3dODVFZWamBAVFRU6AWwj9MHAH0AiNALAH0AeD8/aPR5izIrAAAAAABAExJ4AAAAAAAAySta4FFSUhITJ06MkpKSYi0BJEAvAPQBQB8AIvQCQB8Ait8HclmWZUWZGQAAAAAAoIn4SCsAAAAAACB5Ag8AAAAAACB5Ag8AAAAAACB5Ag8AAAAAACB5DQ483njjjTj//POjW7du0alTpzjyyCNj4cKF+eNZlsWkSZOiT58+0bFjx/jMZz4Tf/jDH2rMsWPHjrjqqquie/fuUVZWFmeeeWa8/vrrH//eAE2ivj7w4IMPximnnBLdu3ePXC4XS5YsqTWHPgDp21sv2LVrV1xzzTXxyU9+MsrKyqJPnz5x4YUXxptvvlljDr0A0lbfc4JJkybFoEGDoqysLKqqquKzn/1sPPPMMzXm0AcgbfX1gQ+6/PLLI5fLxQ9+8IMa1+sDkL76esHFF18cuVyuxuWYY46pMYdeAGkr5DnBH//4xzjzzDOjsrIyysvL45hjjokVK1bkjzdGH2hQ4LFu3boYOXJktG/fPn71q1/Fiy++GLfeemt06dIlP+Z73/tefP/734/bbrstnnvuuejVq1ecfPLJsWnTpvyY8ePHx6xZs2LmzJmxYMGC2Lx5c5xxxhnxzjvvNGjzQNMrpA9s2bIlRo4cGTfffPMe59EHIG319YKtW7fGokWL4pvf/GYsWrQoHnzwwXjppZfizDPPrDGPXgDpKuQ5wSGHHBK33XZbLF26NBYsWBAHHHBAjBo1Kt566638GH0A0lVIH3jPQw89FM8880z06dOn1jF9ANJWaC849dRTY9WqVfnLL3/5yxrH9QJIVyF94JVXXoljjz02Bg0aFHPmzInf//738c1vfjNKS0vzYxqlD2QNcM0112THHnvsHo9XV1dnvXr1ym6++eb8ddu3b88qKyuzH//4x1mWZdn69euz9u3bZzNnzsyPeeONN7I2bdpkjz76aEO2AzSD+vrABy1fvjyLiGzx4sU1rtcHIH0N6QXvefbZZ7OIyF577bUsy/QCSN1H6QMbNmzIIiJ7/PHHsyzTByB1hfaB119/Pdt///2zF154IRswYEA2efLk/DF9ANJXSC+46KKLstGjR+/xuF4AaSukD4wZMyY7//zz93i8sfpAg37D4+GHH47hw4fHF7/4xejRo0d86lOfijvvvDN/fPny5bF69eoYNWpU/rqSkpI4/vjj46mnnoqIiIULF8auXbtqjOnTp08MGTIkPwZouerrA4XQByB9H6UXbNiwIXK5XP4dHnoBpK2hfWDnzp0xderUqKysjCOOOCIi9AFIXSF9oLq6Oi644IKYMGFCHHbYYbXm0AcgfYU+J5gzZ0706NEjDjnkkLjssstizZo1+WN6AaStvj5QXV0djzzySBxyyCFxyimnRI8ePeLoo4+Ohx56KD+msfpAgwKPP//5z3H77bfHwQcfHL/+9a9j3Lhx8dWvfjXuueeeiIhYvXp1RET07Nmzxu169uyZP7Z69ero0KFDVFVV7XEM0HLV1wcKoQ9A+hraC7Zv3x7XXntt/P3f/31UVFREhF4AqSu0D/ziF7+Izp07R2lpaUyePDlmz54d3bt3jwh9AFJXSB+45ZZbol27dvHVr361zjn0AUhfIb3gtNNOi3vvvTeeeOKJuPXWW+O5556LE088MXbs2BERegGkrr4+sGbNmti8eXPcfPPNceqpp8Zjjz0WZ599dpxzzjkxd+7ciGi8PtCuIRuvrq6O4cOHx3e/+92IiPjUpz4Vf/jDH+L222+PCy+8MD8ul8vVuF2WZbWu+7BCxgDNr9A+8FHoA5COhvSCXbt2xZe+9KWorq6OKVOm1Du3XgBpKLQPnHDCCbFkyZJ4++23484774zzzjsvnnnmmejRo8ce59YHIA319YGFCxfGD3/4w1i0aFGD/0/rA5COQp4TjBkzJj9+yJAhMXz48BgwYEA88sgjcc455+xxbr0A0lBfH6iuro6IiNGjR8e//Mu/RETEkUceGU899VT8+Mc/juOPP36Pcze0DzToNzx69+4dgwcPrnHdoYcemj+Teq9evSIiaiUua9asyf/WR69evWLnzp2xbt26PY4BWq76+kAh9AFIX6G9YNeuXXHeeefF8uXLY/bs2fnf7ojQCyB1hfaBsrKyOOigg+KYY46JadOmRbt27WLatGkRoQ9A6urrA/Pnz481a9ZE//79o127dtGuXbt47bXX4utf/3occMABEaEPQGvwUX5O0Lt37xgwYEC8/PLLEaEXQOrq6wPdu3ePdu3a1ZstNEYfaFDgMXLkyFi2bFmN61566aUYMGBAREQceOCB0atXr5g9e3b++M6dO2Pu3Lnx6U9/OiIihg0bFu3bt68xZtWqVfHCCy/kxwAtV319oBD6AKSvkF7wXtjx8ssvx+OPPx7dunWrMV4vgLR91OcEWZblP75CH4C01dcHLrjggnj++edjyZIl+UufPn1iwoQJ8etf/zoi9AFoDT7Kc4K1a9fGypUro3fv3hGhF0Dq6usDHTp0iKOOOmqvYxqtDxR8evMsy5599tmsXbt22b/9279lL7/8cnbvvfdmnTp1yn7605/mx9x8881ZZWVl9uCDD2ZLly7NvvzlL2e9e/fONm7cmB8zbty4rG/fvtnjjz+eLVq0KDvxxBOzI444Itu9e3dDtgM0g0L6wNq1a7PFixdnjzzySBYR2cyZM7PFixdnq1atyo/RByBt9fWCXbt2ZWeeeWbWt2/fbMmSJdmqVavylx07duTn0QsgXfX1gc2bN2ff+MY3sqeffjp79dVXs4ULF2aXXnppVlJSkr3wwgv5efQBSFchrw0+bMCAAdnkyZNrXKcPQNrq6wWbNm3Kvv71r2dPPfVUtnz58uzJJ5/MRowYke2///5+XgitRCHPCR588MGsffv22dSpU7OXX345+8///M+sbdu22fz58/NjGqMPNCjwyLIs++///u9syJAhWUlJSTZo0KBs6tSpNY5XV1dnEydOzHr16pWVlJRkxx13XLZ06dIaY7Zt25ZdeeWVWdeuXbOOHTtmZ5xxRrZixYqGbgVoJvX1genTp2cRUesyceLE/Bh9ANK3t16wfPnyOvtARGRPPvlkfpxeAGnbWx/Ytm1bdvbZZ2d9+vTJOnTokPXu3Ts788wzs2effbbGHPoApK2+1wYfVlfgoQ9A+vbWC7Zu3ZqNGjUq22+//bL27dtn/fv3zy666KJa/8/1AkhbIc8Jpk2blh100EFZaWlpdsQRR2QPPfRQjeON0QdyWZZlDfjtFAAAAAAAgBanQefwAAAAAAAAaIkEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAAAAAQPIEHgAAwMc2adKkOPLII5t7GwAAwD4sl2VZ1tybAAAAWq5cLrfX4xdddFHcdtttsWPHjujWrVsT7QoAAKAmgQcAALBXq1evzv/5/vvvjxtuuCGWLVuWv65jx45RWVnZHFsDAADI85FWAADAXvXq1St/qaysjFwuV+u6D3+k1cUXXxxnnXVWfPe7342ePXtGly5d4lvf+lbs3r07JkyYEF27do2+ffvGXXfdVWOtN954I8aMGRNVVVXRrVu3GD16dLz66qtNe4cBAIAkCTwAAICieOKJJ+LNN9+MefPmxfe///2YNGlSnHHGGVFVVRXPPPNMjBs3LsaNGxcrV66MiIitW7fGCSecEJ07d4558+bFggULonPnznHqqafGzp07m/neAAAALZ3AAwAAKIquXbvGf/zHf8TAgQPjkksuiYEDB8bWrVvjuuuui4MPPji+8Y1vRIcOHeK3v/1tRETMnDkz2rRpEz/5yU/ik5/8ZBx66KExffr0WLFiRcyZM6d57wwAANDitWvuDQAAAK3TYYcdFm3avP8eq549e8aQIUPyf2/btm1069Yt1qxZExERCxcujD/96U9RXl5eY57t27fHK6+80jSbBgAAkiXwAAAAiqJ9+/Y1/p7L5eq8rrq6OiIiqqurY9iwYXHvvffWmmu//fYr3kYBAIBWQeABAAC0CEOHDo37778/evToERUVFc29HQAAIDHO4QEAALQIY8eOje7du8fo0aNj/vz5sXz58pg7d25cffXV8frrrzf39gAAgBZO4AEAALQInTp1innz5kX//v3jnHPOiUMPPTQuueSS2LZtm9/4AAAA6pXLsixr7k0AAAAAAAB8HH7DAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASJ7AAwAAAAAASN7/B8CasjLyMJvqAAAAAElFTkSuQmCC",
"text/plain": [
""
]
@@ -544,7 +544,7 @@
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAqrUlEQVR4nO3de5hVdb0/8M/AMMPcuclNbpYBIl7hd5LoydRUvARpJikiRik8J7Urx0vmJbPUE2JWWiqCFkWmQJZFXhIEQVSEE6YHUVFQQRRwYIDhNuv3hw/7OA4wMzC3Nb5ezzPPM7PWd3+/373X2p+1Z7/3XisrSZIkAAAAAAAAUqxFY08AAAAAAABgfwk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB67ccEFF0RWVlaVn1deeWWP64YMGZK5fa9evXbb5sYbb8y0WbFiRXzxi1+MgoKC6NChQ1x66aWxbdu2zPry8vK44IIL4rDDDovs7Oz40pe+VGWec+fOjcGDB0f79u0jLy8v+vbtGxMmTKjXxybt0rJtIyK2bt0aP/jBD6Jnz56Rm5sbn/zkJ+Oee+6pt8cm7XZtv7Fjx1ZZ95//+Z+RlZUVF1xwQaW2TWVb7/LUU09FdnZ2HHnkkfv1WDR3db2tIyLmzZsXp556arRt2zZat24dhx12WIwfPz527txZqd369etj5MiRUVJSEiUlJTFy5Mh4//33K7X51re+FQMGDIjc3NzdbsvXX399t3OaOXPmfj0uzdmaNWtizJgx0aNHj8jNzY3OnTvHySefHPPnz4+Iys/X/Pz86N+/f/zmN7/J3H7y5Mm7fcxbt25dZax58+ZFy5Ytq+wjEf+37RYvXpxZtnHjxvj85z8fffv2jZUrV0ZE7HasrKysmDp1akREzJo1q9Ly9u3bx/HHHx9PPfVUrR6XBx98MPr16xe5ubnRr1+/mD59eqX1d9xxRxx++OFRXFwcxcXFMWjQoPj73/9eqzEAAAAgTbIbY9D1m7ZV36gOtS3IqfVthgwZEpMmTaq07IADDtjjutzc3Ep//+hHP4oLL7yw0rKioqKIiNi5c2ecdtppccABB8TcuXNj7dq1MWrUqEiSJH7xi19k2uTl5cWll14aDz744G7nWFBQEBdffHEcfvjhUVBQEHPnzo0xY8ZEQUFBXHTRRbW+z3WhdGtpg41VkluyT7dLw7aNiDj77LPjnXfeiYkTJ8bBBx8ca9asiR07duzTfd5fW0rLG3S8vJKqb0LWRPfu3WPq1KkxYcKEyMvLi4gPQoc//OEP0aNHj0ptm9K2jogoLS2N888/P0444YR45513an/n68jOtWsbdLyW7dvv0+3qcltPnz49zj777Pja174WTzzxRLRp0yYee+yx+K//+q94+umn4/7774+srKyIiDj33HPjzTffzIQTF110UYwcOTL+8pe/ZPpLkiRGjx4dCxYsiH/96197vA+PPfZYHHrooZm/27Vrt0+Pxf7asmVLg463a3vVxpe//OXYvn173HvvvfGJT3wi3nnnnXj88cdj3bp1mTa7nq9lZWUxefLkGDt2bLRp0yaGDx8eERHFxcWxdOnSSv3u2q4fds8998Qll1wSd999d6xYsaLK/vRh7777bpxyyikR8cGHEDp06JBZN2nSpCqhSZs2bSr9vXTp0iguLo533303fvzjH8dpp50WL7/8cnTs2LHax2T+/PkxfPjwuP766+OMM87I7Mdz586NT3/60xER0a1bt7jxxhvj4IMPjoiIe++9N4YNGxaLFi2qtO8BAABAc9EogccpNz/RoOM9fd3Jtb7Nrk+Q1nbdLkVFRXts88gjj8SLL74YK1eujK5du0ZExPjx4+OCCy6IG264IYqLi6OgoCDuuOOOiPjgU98f/QRxRMRRRx0VRx11VObvXr16xbRp02LOnDmNFniM/Pu5DTbWQ196eJ9ul4ZtO3PmzJg9e3a89tprmTdBe/XqVcN7WPfuO/+BBh1vzJ/P26fbHX300fHaa6/FtGnTYsSIERERMW3atOjevXt84hOfqNS2qWzrXcaMGRPnnntutGzZMmbMmFHDe1z3Vh9+ZIOOd+BbK/fpdnW1rTdt2hQXXnhhDB06NO68887M8m984xvRqVOnGDp0aNx///0xfPjweOmll2LmzJnx9NNPZ95Qvuuuu2LQoEGxdOnS6NOnT0RE3HbbbRHxwZvhews82rdvX+0+2BB++9vfNuh4tT0+vf/++zF37tyYNWtWHHvssRER0bNnz/iP//iPSu0+/Hz98Y9/HPfff3/MmDEjE3hkZWVV+3hv2rQp7r///nj22Wdj9erVMXny5Lj66qt323blypVx4oknRpcuXeKhhx7KhKG7tGnTptrxOnbsmGl31VVXxf333x8LFiyIL37xi3u9XUTErbfeGieeeGJcccUVERFxxRVXxOzZs+PWW2+NP/zhDxERVfq54YYb4o477oinn35a4AEAAECz5JRWjWD+/PnRv3//zJukEREnn3xybN26NRYuXLjP/S5atCjmzZuXeUOIhldX2/ahhx6KgQMHxs033xwHHnhg9O7dO77//e83+Cex0+hrX/tapU/z33PPPTF69Og6H6cun8eTJk2KV199Na655pq6nmazVhfb+pFHHom1a9fG97///SrrvvjFL0bv3r0zbx7Pnz8/SkpKMmFHRMQxxxwTJSUlMW/evFrPf+jQodGxY8cYPHhwPPBAw4aKaVJYWBiFhYUxY8aM2Lp1a41v17p169i+fXutxvrjH/8Yffr0iT59+sR5550XkyZNiiRJqrRbunRpDB48OPr27RszZ86sEnbU1ubNmzP7cqtWrWp0m/nz58dJJ51UadnJJ5+8x31x586dMXXq1Ni0aVMMGjRov+YLAAAATZXAYw/++te/Zt5kKSwsjK985St7XFdYWBjXX399pdtfdtllVdrMmjUrIiJWr14dnTp1qtS+bdu2kZOTE6tXr671XLt16xa5ubkxcODA+OY3vxnf+MY3an+HP0bSsG1fe+21mDt3brzwwgsxffr0uPXWW+OBBx6Ib37zm/t+xz8mRo4cGXPnzo3XX3893njjjXjqqafivPOqfmOkqWzrZcuWxeWXXx5TpkyJ7OxG+dJdatXFtn755ZcjIuKQQw7Z7Rh9+/bNtFm9evVuTzXUsWPHWm3zwsLCuOWWW+KBBx6Iv/3tb3HCCSfE8OHD43e/+12N+/g4yc7OjsmTJ8e9994bbdq0icGDB8eVV165x2/P7NixIyZPnhxLliyJE044IbO8tLS0yn7w0cBg4sSJmX1oyJAhUVZWFo8//niVMc4///z45Cc/GQ8++GCVU+Htcs4551QZ77XXXqvUplu3bpl1EyZMiAEDBlSa897srgZ16tSpyr64ZMmSKCwsjNzc3Bg7dmxMnz49+vXrV6MxAAAAIG28u7YHxx13XOZUNBEfXC9jT+siqp57fdy4cZmL5u5y4IEHZn7f3XnDkyTZ7fLqzJkzJ8rKyuLpp5+Oyy+/PA4++OA455xzat3Px0Uatm1FRUVkZWXFlClToqTkg2uV3HLLLXHWWWfFr371q306B/7HRYcOHeK0006Le++9N5IkidNOO63SefV3aQrbeufOnXHuuefGddddF717967Rbfg/dbmtd/cp/l3LP7w96+L53aFDh/jOd76T+XvgwIGxfv36uPnmm3cb2PDBNTxOO+20mDNnTsyfPz9mzpwZN998c9x9992Z5+hll10WV111VWzdujVycnJi3LhxMWbMmEwfRUVF8fzzz1fq98O1dOnSpfHMM8/EtGnTIuKDoGX48OFxzz33xBe+8IVKtxs2bFhMnz49HnzwwTj77LN3O+cJEyZUuV337t0r/T1nzpwoKCiIRYsWxWWXXRaTJ0+u8Tc8Iqruj7vbF/v06ROLFy+O999/Px588MEYNWpUzJ49W+gBAABAs9Qogcff/+u4xhi2VgoKCjIX+azNul06dOiwxzadO3eOBQsWVFq2fv362L59e5VPa9bEQQcdFBERhx12WLzzzjtx7bXXNlrg8dtTft8o49ZGGrZtly5d4sADD8yEHREffAI9SZJ4880341Of+lSN+6oL5993VoOOt79Gjx4dF198cURE/OpXv9ptm6awrTdu3BjPPfdcLFq0KDPfioqKSJIksrOz45FHHonjjz++Rn3Vlc7/Wtyg4+2v/d3Wu4Kml156KT7zmc9UWf+///u/mTeGO3fuvNsLyr/77rv7VLs/7Jhjjom77757v/rYVyNHjmyUcWurdevWceKJJ8aJJ54YV199dXzjG9+Ia665JhN47Aoo8/Pzo0uXLlXe+G/RosVen/MTJ06MHTt2VAo1kySJVq1axfr166Nt27aZ5VdeeWUcfvjhMWLEiEiSJHOdkA/r3LlztTXmoIMOijZt2kTv3r2jvLw8zjjjjHjhhRf2+K2Rj/b/0W9zrFmzpsq+mJOTk5nHwIED49lnn42f//zn8Zvf/KbaMQAAACBtGiXwaFuQ0xjDNhmDBg2KG264IVatWhVdunSJiA/OI5+bmxsDBgzYr76TJKnVOc7rWkluSfWNmrG62raDBw+OP/3pT1FWVhaFhYUR8cGpd1q0aBHdunWrl7nvTV5J6wYfc38MGTIktm3bFhEfnNO+PtTFti4uLo4lS5ZUWnb77bfHP//5z3jggQcyYWZDatm+fYOPuT/2d1ufdNJJ0a5duxg/fnyVwOOhhx6KZcuWZU5/NWjQoCgtLY1nnnkmc8HsBQsWRGlp6W7DktpYtGhRZj9qaGn9xli/fv1ixowZmb/3FlBWZ8eOHXHffffF+PHjq5zm6stf/nJMmTIlE6ztctVVV0V2dnaMGDEiKioq9vuDBiNHjowf/ehHcfvtt1f6BtCeDBo0KB599NFKbR955JFq98XGfp0AAAAA9ckprfbB1q1bq3yqMjs7u9KpVDZu3FilTX5+fhQXF8dJJ50U/fr1i5EjR8Z///d/x7p16+L73/9+XHjhhVFcXJxp/+KLL8a2bdti3bp1sXHjxli8eHFERBx55JER8cGnmXv06BF9+/aNiIi5c+fGz372s7jkkkvq4V5/PDSVbXvuuefG9ddfH1/72tfiuuuui/feey/GjRsXo0ePTu2bkw2pZcuW8dJLL2V+352msK1btGgR/fv3r9R/x44do3Xr1lWWs3v7u60LCgriN7/5TXz1q1+Niy66KC6++OIoLi6Oxx9/PMaNGxdnnXVW5pRFhxxySAwZMiQuvPDCzKfjL7roojj99NOjT58+mb5feeWVKCsri9WrV8eWLVsy27xfv36Rk5MT9957b7Rq1SqOOuqoaNGiRfzlL3+J2267LW666aa6fniahbVr18ZXvvKVGD16dBx++OFRVFQUzz33XNx8880xbNiwGveTJMlur7XSsWPH+Otf/xrr16+Pr3/965W+WRcRcdZZZ8XEiROrBB4REZdffnm0bNkyRo4cGRUVFTFixIjMuvfff7/KeEVFRZVOo/hhLVq0iG9/+9vx4x//OMaMGRP5+fl7vT/f+ta34nOf+1zcdNNNMWzYsPjzn/8cjz32WMydOzfT5sorr4xTTjklunfvHhs3boypU6fGrFmzYubMmXvtGwAAAFIroYpRo0Ylw4YN2+O6iKjy06dPn0ybnj177rbNmDFjMm3eeOON5LTTTkvy8vKSdu3aJRdffHFSXl5eaaw99bPLbbfdlhx66KFJfn5+UlxcnBx11FHJ7bffnuzcubNuH5BmJC3bNkmS5KWXXkq+8IUvJHl5eUm3bt2S7373u8nmzZvr7sFoZva2bZMkSYYNG5aMGjUq07YpbesPu+aaa5Ijjjhinx6Dj4u63tZJkiRPPvlkMmTIkKSkpCTJyclJ+vXrl/zsZz9LduzYUand2rVrkxEjRiRFRUVJUVFRMmLEiGT9+vWV2hx77LG7HXP58uVJkiTJ5MmTk0MOOSTJz89PioqKkgEDBiS//e1v9/dhabbKy8uTyy+/PDn66KOTkpKSJD8/P+nTp09y1VVXZWpiz549kwkTJuyxj0mTJu12m0REsmrVquT0009PTj311N3eduHChUlEJAsXLkyWL1+eRESyaNGiSm3Gjx+ftGzZMrnvvvuSJEn2ONZPf/rTJEmS5Iknnkgiosq+U1ZWlrRt2za56aabavTY/OlPf0r69OmTtGrVKunbt2/y4IMPVlo/evTopGfPnklOTk5ywAEHJCeccELyyCOP1KhvAAAASKOsJNnDlVoBAAAAAABSokVjTwAAAAAAAGB/CTwAAJqYwsLCPf7MmTOnsacHAAAATZJTWgEANDGvvPLKHtcdeOCBkZeX14CzAQAAgHQQeAAAAAAAAKnnlFYAAAAAAEDqCTwAAAAAAIDUy66vjisqKuLtt9+OoqKiyMrKqq9hAAAAAACAFEiSJDZu3Bhdu3aNFi3q/vsY9RZ4vP3229G9e/f66h4AAAAAAEihlStXRrdu3eq833oLPIqKiiLig4kXFxfX1zAAAAAAAEAKbNiwIbp3757JD+pavQUeu05jVVxcLPAAAAAAAAAiIurtMhguWg4AAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUq/eA49N6zbX9xDQIDat2xzP/eF/GmSfXle+Ln7/0pRYV76u3seCmrJf0tx8eJ9eu2JpzPnuqJh13bXx729eHBueey42jL8ldr7zTpXbbd68OZ577rnYvHlzpd/ToiZzTuP9onmo72ONY1k61WS7Nedt25zvG0BTl6YanJa5vvLe6rj0/unxynurG3sqNJK1G7fWa//1Hnhseb+8voeABrF5/ZZYOHVJbF6/pd7HWl++LqYu/X2sb+IHKT5e7Jc0Nx/ep0vffDXaz34xVkdWtJnx59jy4oux8ZYJsXPNmiq327x5czz//POZwGPX72lRkzmn8X7RPNT3scaxLJ1qst2a87ZtzvcNoKlLUw1Oy1xfX7s2nvl3fry+dm1jT4VGsrYs5YEHAAAAAABAfRN4AAAAAAAAqSfwAAAAAAAAUi+7vgfYumlbbCl1HQ/Sb2vZtgYfs2xbWZRuLW3wcWF3yraVNfYUoF6UbSuLlts3V/oUSFJW/f5eXp7u1zfl5eWxZcvur0uV9vtG+tXXayDHsnTb237xcdi2/jcAaHhpPL409ePFlh0fXCdwU/nOWL+p4d9ro/Ft3LKjXvuv98DjHzfMjrxWefU9DDRLP5z3g8aeAkCz98N5P4juKzbHNz+0bOcNP632dn/729/qb1INIO3zp3nzGojd+bjvFx/3+w9AzTT148X2zQdExDnx02mr4qexqrGnQyPYsXVTvfbvlFYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpV+/X8Dj5B8dGr/496nsYqHdrX18fD1/9eIOOef1nboheJQc16JiwJ6+XLm/y5wKFfXH9Z26Ilm1fiYibMsta/uCKaq/jceqpp0ZEeq+Fceqpp0b79u13u27t2rWpvV80D/X1GsixLN32tl98HLat/w0AGl4ajy9N/Xjx5Csvxw1L18UVZ3aJzx3ct7GnQyNYvOztOP6m6tvtq3oPPHILciKvpHV9DwP1Lrcwp8HHLMwpjJLckgYfF3anMKewsacA9aIwpzCyWuVH+YeWZRVWv7+3bp3u1zetW7eOvLy8Pa6DxlRfr4Ecy9Jtb/vFx2Hb+t8AoOGl8fjS1I8Xedn5EbEuClq3jLYFDf9eG42vKK9+IwmntAIAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB69X7R8rw2LnpJ85DfNi8GfPWwyG+7+wu81qW2rdvFV/ucG21bt6v3saCm7Jc0Nx/ep5Nun4y3ju0XnSOJ9780LLr36xfx3e9Ey44dq9wuPz8/jj766MjPz4+IqPR7Gnx0/vvaBupDfR9rHMvSqSbbrTlv2+Z83wCaujTV4LTMtVf79vEfh74cvdq3b+yp0EjaF+bWa/9ZSZIk9dHxhg0boqSkJEpLS6O4uLg+hgAAAAAAAFKivnMDp7QCAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AI+XWla+L3780JdaVr2uS/QEN472NW+OuJ16J9zZubeypANBMNYdjTXO4DzQt9ikA2HfrytfFxCV3x8Qld3svkjoj8Ei59eXrYurS38f6OioKdd0f0DDe27g1Js561T/bANSb5nCsaQ73gabFPgUA+259+br486vT48+vTvdeJHVG4AEAAAAAAKSewAMAAAAAAEi97MaeAHWjbFtZlG4trZN+gPTauGV7rN+0rbGnAUAztHHL9saeQp1xvKSuNKfnBQBAcyDwaCZ+OO8HjT0FoAm45L7nGnsKANDkOV4CAEDz5JRWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqecaHs3E9Z+5IXqVHLTf/bxeutz1QCDFfnH+wDi4c1FjTwOAZuiV1RubzbUvHC+pK83peQEA0BwIPJqJwpzCKMktqZN+gPQqymsVbQtyGnsaADRDRXmtGnsKdcbxkrrSnJ4XAADNgVNaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD0XLU+5tq3bxVf7nBttW7drkv0BDaNDUW58/fOfjA5FuY09FQCaqeZwrGkO94GmxT4FAPuubet2MeyTZ2R+h7qQlSRJUh8db9iwIUpKSqK0tDSKi4vrYwgAAAAAACAl6js3cEorAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1suur4yRJIiJiw4YN9TUEAAAAAACQErvygl35QV2rt8Bj7dq1ERHRvXv3+hoCAAAAAABImbVr10ZJSUmd91tvgUe7du0iImLFihX1MnEgHTZs2BDdu3ePlStXRnFxcWNPB2gE6gCgDgARagGgDgARpaWl0aNHj0x+UNfqLfBo0eKDy4OUlJQoYEAUFxerBfAxpw4A6gAQoRYA6gDwf/lBnfdbL70CAAAAAAA0IIEHAAAAAACQevUWeOTm5sY111wTubm59TUEkAJqAaAOAOoAEKEWAOoAUP91ICtJkqReegYAAAAAAGggTmkFAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9WodeLz11ltx3nnnRfv27SM/Pz+OPPLIWLhwYWZ9kiRx7bXXRteuXSMvLy8+//nPx7///e9KfWzdujUuueSS6NChQxQUFMTQoUPjzTff3P97AzSI6urAtGnT4uSTT44OHTpEVlZWLF68uEof6gCk395qwfbt2+Oyyy6Lww47LAoKCqJr165x/vnnx9tvv12pD7UA0q261wTXXntt9O3bNwoKCqJt27bxhS98IRYsWFCpD3UA0q26OvBhY8aMiaysrLj11lsrLVcHIP2qqwUXXHBBZGVlVfo55phjKvWhFkC61eQ1wUsvvRRDhw6NkpKSKCoqimOOOSZWrFiRWV8XdaBWgcf69etj8ODB0apVq/j73/8eL774YowfPz7atGmTaXPzzTfHLbfcEr/85S/j2Wefjc6dO8eJJ54YGzduzLT59re/HdOnT4+pU6fG3Llzo6ysLE4//fTYuXNnrSYPNLya1IFNmzbF4MGD48Ybb9xjP+oApFt1tWDz5s3x/PPPxw9/+MN4/vnnY9q0afHyyy/H0KFDK/WjFkB61eQ1Qe/eveOXv/xlLFmyJObOnRu9evWKk046Kd59991MG3UA0qsmdWCXGTNmxIIFC6Jr165V1qkDkG41rQVDhgyJVatWZX7+9re/VVqvFkB61aQOvPrqq/HZz342+vbtG7NmzYr/+Z//iR/+8IfRunXrTJs6qQNJLVx22WXJZz/72T2ur6ioSDp37pzceOONmWXl5eVJSUlJ8utf/zpJkiR5//33k1atWiVTp07NtHnrrbeSFi1aJDNnzqzNdIBGUF0d+LDly5cnEZEsWrSo0nJ1ANKvNrVgl2eeeSaJiOSNN95IkkQtgLTblzpQWlqaRETy2GOPJUmiDkDa1bQOvPnmm8mBBx6YvPDCC0nPnj2TCRMmZNapA5B+NakFo0aNSoYNG7bH9WoBpFtN6sDw4cOT8847b4/r66oO1OobHg899FAMHDgwvvKVr0THjh3jqKOOirvuuiuzfvny5bF69eo46aSTMstyc3Pj2GOPjXnz5kVExMKFC2P79u2V2nTt2jX69++faQM0XdXVgZpQByD99qUWlJaWRlZWVuYTHmoBpFtt68C2bdvizjvvjJKSkjjiiCMiQh2AtKtJHaioqIiRI0fGuHHj4tBDD63ShzoA6VfT1wSzZs2Kjh07Ru/evePCCy+MNWvWZNapBZBu1dWBioqKePjhh6N3795x8sknR8eOHePTn/50zJgxI9OmrupArQKP1157Le6444741Kc+Ff/4xz9i7Nixcemll8Z9990XERGrV6+OiIhOnTpVul2nTp0y61avXh05OTnRtm3bPbYBmq7q6kBNqAOQfrWtBeXl5XH55ZfHueeeG8XFxRGhFkDa1bQO/PWvf43CwsJo3bp1TJgwIR599NHo0KFDRKgDkHY1qQM33XRTZGdnx6WXXrrbPtQBSL+a1IJTTjklpkyZEv/85z9j/Pjx8eyzz8bxxx8fW7dujQi1ANKuujqwZs2aKCsrixtvvDGGDBkSjzzySJxxxhlx5plnxuzZsyOi7upAdm0mXlFREQMHDoyf/OQnERFx1FFHxb///e+444474vzzz8+0y8rKqnS7JEmqLPuomrQBGl9N68C+UAcgPWpTC7Zv3x5f/epXo6KiIm6//fZq+1YLIB1qWgeOO+64WLx4cbz33ntx1113xdlnnx0LFiyIjh077rFvdQDSobo6sHDhwvj5z38ezz//fK2f0+oApEdNXhMMHz48075///4xcODA6NmzZzz88MNx5pln7rFvtQDSobo6UFFRERERw4YNi+985zsREXHkkUfGvHnz4te//nUce+yxe+y7tnWgVt/w6NKlS/Tr16/SskMOOSRzJfXOnTtHRFRJXNasWZP51kfnzp1j27ZtsX79+j22AZqu6upATagDkH41rQXbt2+Ps88+O5YvXx6PPvpo5tsdEWoBpF1N60BBQUEcfPDBccwxx8TEiRMjOzs7Jk6cGBHqAKRddXVgzpw5sWbNmujRo0dkZ2dHdnZ2vPHGG/G9730vevXqFRHqADQH+/I+QZcuXaJnz56xbNmyiFALIO2qqwMdOnSI7OzsarOFuqgDtQo8Bg8eHEuXLq207OWXX46ePXtGRMRBBx0UnTt3jkcffTSzftu2bTF79uz4zGc+ExERAwYMiFatWlVqs2rVqnjhhRcybYCmq7o6UBPqAKRfTWrBrrBj2bJl8dhjj0X79u0rtVcLIN329TVBkiSZ01eoA5Bu1dWBkSNHxr/+9a9YvHhx5qdr164xbty4+Mc//hER6gA0B/vymmDt2rWxcuXK6NKlS0SoBZB21dWBnJyc+H//7//ttU2d1YEaX948SZJnnnkmyc7OTm644YZk2bJlyZQpU5L8/Pzkd7/7XabNjTfemJSUlCTTpk1LlixZkpxzzjlJly5dkg0bNmTajB07NunWrVvy2GOPJc8//3xy/PHHJ0cccUSyY8eO2kwHaAQ1qQNr165NFi1alDz88MNJRCRTp05NFi1alKxatSrTRh2AdKuuFmzfvj0ZOnRo0q1bt2Tx4sXJqlWrMj9bt27N9KMWQHpVVwfKysqSK664Ipk/f37y+uuvJwsXLky+/vWvJ7m5uckLL7yQ6UcdgPSqyf8GH9WzZ89kwoQJlZapA5Bu1dWCjRs3Jt/73veSefPmJcuXL0+eeOKJZNCgQcmBBx7o/UJoJmrymmDatGlJq1atkjvvvDNZtmxZ8otf/CJp2bJlMmfOnEybuqgDtQo8kiRJ/vKXvyT9+/dPcnNzk759+yZ33nlnpfUVFRXJNddck3Tu3DnJzc1NPve5zyVLliyp1GbLli3JxRdfnLRr1y7Jy8tLTj/99GTFihW1nQrQSKqrA5MmTUoiosrPNddck2mjDkD67a0WLF++fLd1ICKSJ554ItNOLYB021sd2LJlS3LGGWckXbt2TXJycpIuXbokQ4cOTZ555plKfagDkG7V/W/wUbsLPNQBSL+91YLNmzcnJ510UnLAAQckrVq1Snr06JGMGjWqyvNcLYB0q8lrgokTJyYHH3xw0rp16+SII45IZsyYUWl9XdSBrCRJklp8OwUAAAAAAKDJqdU1PAAAAAAAAJoigQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAADYb9dee20ceeSRjT0NAADgYywrSZKksScBAAA0XVlZWXtdP2rUqPjlL38ZW7dujfbt2zfQrAAAACoTeAAAAHu1evXqzO9//OMf4+qrr46lS5dmluXl5UVJSUljTA0AACDDKa0AAIC96ty5c+anpKQksrKyqiz76CmtLrjggvjSl74UP/nJT6JTp07Rpk2buO6662LHjh0xbty4aNeuXXTr1i3uueeeSmO99dZbMXz48Gjbtm20b98+hg0bFq+//nrD3mEAACCVBB4AAEC9+Oc//xlvv/12PPnkk3HLLbfEtddeG6effnq0bds2FixYEGPHjo2xY8fGypUrIyJi8+bNcdxxx0VhYWE8+eSTMXfu3CgsLIwhQ4bEtm3bGvneAAAATZ3AAwAAqBft2rWL2267Lfr06ROjR4+OPn36xObNm+PKK6+MT33qU3HFFVdETk5OPPXUUxERMXXq1GjRokXcfffdcdhhh8UhhxwSkyZNihUrVsSsWbMa984AAABNXnZjTwAAAGieDj300GjR4v8+Y9WpU6fo379/5u+WLVtG+/btY82aNRERsXDhwnjllVeiqKioUj/l5eXx6quvNsykAQCA1BJ4AAAA9aJVq1aV/s7KytrtsoqKioiIqKioiAEDBsSUKVOq9HXAAQfU30QBAIBmQeABAAA0CUcffXT88Y9/jI4dO0ZxcXFjTwcAAEgZ1/AAAACahBEjRkSHDh1i2LBhMWfOnFi+fHnMnj07vvWtb8Wbb77Z2NMDAACaOIEHAADQJOTn58eTTz4ZPXr0iDPPPDMOOeSQGD16dGzZssU3PgAAgGplJUmSNPYkAAAAAAAA9odveAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACk3v8HUEYGyV77YKkAAAAASUVORK5CYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAqrUlEQVR4nO3de5hVdb0/8M/AMMPcuclNbpYBIl7hd5LoydRUvARpJikiRik8J7Urx0vmJbPUE2JWWiqCFkWmQJZFXhIEQVSEE6YHUVFQQRRwYIDhNuv3hw/7OA4wMzC3Nb5ezzPPM7PWd3+/373X2p+1Z7/3XisrSZIkAAAAAAAAUqxFY08AAAAAAABgfwk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB67ccEFF0RWVlaVn1deeWWP64YMGZK5fa9evXbb5sYbb8y0WbFiRXzxi1+MgoKC6NChQ1x66aWxbdu2zPry8vK44IIL4rDDDovs7Oz40pe+VGWec+fOjcGDB0f79u0jLy8v+vbtGxMmTKjXxybt0rJtIyK2bt0aP/jBD6Jnz56Rm5sbn/zkJ+Oee+6pt8cm7XZtv7Fjx1ZZ95//+Z+RlZUVF1xwQaW2TWVb7/LUU09FdnZ2HHnkkfv1WDR3db2tIyLmzZsXp556arRt2zZat24dhx12WIwfPz527txZqd369etj5MiRUVJSEiUlJTFy5Mh4//33K7X51re+FQMGDIjc3NzdbsvXX399t3OaOXPmfj0uzdmaNWtizJgx0aNHj8jNzY3OnTvHySefHPPnz4+Iys/X/Pz86N+/f/zmN7/J3H7y5Mm7fcxbt25dZax58+ZFy5Ytq+wjEf+37RYvXpxZtnHjxvj85z8fffv2jZUrV0ZE7HasrKysmDp1akREzJo1q9Ly9u3bx/HHHx9PPfVUrR6XBx98MPr16xe5ubnRr1+/mD59eqX1d9xxRxx++OFRXFwcxcXFMWjQoPj73/9eqzEAAAAgTbIbY9D1m7ZV36gOtS3IqfVthgwZEpMmTaq07IADDtjjutzc3Ep//+hHP4oLL7yw0rKioqKIiNi5c2ecdtppccABB8TcuXNj7dq1MWrUqEiSJH7xi19k2uTl5cWll14aDz744G7nWFBQEBdffHEcfvjhUVBQEHPnzo0xY8ZEQUFBXHTRRbW+z3WhdGtpg41VkluyT7dLw7aNiDj77LPjnXfeiYkTJ8bBBx8ca9asiR07duzTfd5fW0rLG3S8vJKqb0LWRPfu3WPq1KkxYcKEyMvLi4gPQoc//OEP0aNHj0ptm9K2jogoLS2N888/P0444YR45513an/n68jOtWsbdLyW7dvv0+3qcltPnz49zj777Pja174WTzzxRLRp0yYee+yx+K//+q94+umn4/7774+srKyIiDj33HPjzTffzIQTF110UYwcOTL+8pe/ZPpLkiRGjx4dCxYsiH/96197vA+PPfZYHHrooZm/27Vrt0+Pxf7asmVLg463a3vVxpe//OXYvn173HvvvfGJT3wi3nnnnXj88cdj3bp1mTa7nq9lZWUxefLkGDt2bLRp0yaGDx8eERHFxcWxdOnSSv3u2q4fds8998Qll1wSd999d6xYsaLK/vRh7777bpxyyikR8cGHEDp06JBZN2nSpCqhSZs2bSr9vXTp0iguLo533303fvzjH8dpp50WL7/8cnTs2LHax2T+/PkxfPjwuP766+OMM87I7Mdz586NT3/60xER0a1bt7jxxhvj4IMPjoiIe++9N4YNGxaLFi2qtO8BAABAc9EogccpNz/RoOM9fd3Jtb7Nrk+Q1nbdLkVFRXts88gjj8SLL74YK1eujK5du0ZExPjx4+OCCy6IG264IYqLi6OgoCDuuOOOiPjgU98f/QRxRMRRRx0VRx11VObvXr16xbRp02LOnDmNFniM/Pu5DTbWQ196eJ9ul4ZtO3PmzJg9e3a89tprmTdBe/XqVcN7WPfuO/+BBh1vzJ/P26fbHX300fHaa6/FtGnTYsSIERERMW3atOjevXt84hOfqNS2qWzrXcaMGRPnnntutGzZMmbMmFHDe1z3Vh9+ZIOOd+BbK/fpdnW1rTdt2hQXXnhhDB06NO68887M8m984xvRqVOnGDp0aNx///0xfPjweOmll2LmzJnx9NNPZ95Qvuuuu2LQoEGxdOnS6NOnT0RE3HbbbRHxwZvhews82rdvX+0+2BB++9vfNuh4tT0+vf/++zF37tyYNWtWHHvssRER0bNnz/iP//iPSu0+/Hz98Y9/HPfff3/MmDEjE3hkZWVV+3hv2rQp7r///nj22Wdj9erVMXny5Lj66qt323blypVx4oknRpcuXeKhhx7KhKG7tGnTptrxOnbsmGl31VVXxf333x8LFiyIL37xi3u9XUTErbfeGieeeGJcccUVERFxxRVXxOzZs+PWW2+NP/zhDxERVfq54YYb4o477oinn35a4AEAAECz5JRWjWD+/PnRv3//zJukEREnn3xybN26NRYuXLjP/S5atCjmzZuXeUOIhldX2/ahhx6KgQMHxs033xwHHnhg9O7dO77//e83+Cex0+hrX/tapU/z33PPPTF69Og6H6cun8eTJk2KV199Na655pq6nmazVhfb+pFHHom1a9fG97///SrrvvjFL0bv3r0zbx7Pnz8/SkpKMmFHRMQxxxwTJSUlMW/evFrPf+jQodGxY8cYPHhwPPBAw4aKaVJYWBiFhYUxY8aM2Lp1a41v17p169i+fXutxvrjH/8Yffr0iT59+sR5550XkyZNiiRJqrRbunRpDB48OPr27RszZ86sEnbU1ubNmzP7cqtWrWp0m/nz58dJJ51UadnJJ5+8x31x586dMXXq1Ni0aVMMGjRov+YLAAAATZXAYw/++te/Zt5kKSwsjK985St7XFdYWBjXX399pdtfdtllVdrMmjUrIiJWr14dnTp1qtS+bdu2kZOTE6tXr671XLt16xa5ubkxcODA+OY3vxnf+MY3an+HP0bSsG1fe+21mDt3brzwwgsxffr0uPXWW+OBBx6Ib37zm/t+xz8mRo4cGXPnzo3XX3893njjjXjqqafivPOqfmOkqWzrZcuWxeWXXx5TpkyJ7OxG+dJdatXFtn755ZcjIuKQQw7Z7Rh9+/bNtFm9evVuTzXUsWPHWm3zwsLCuOWWW+KBBx6Iv/3tb3HCCSfE8OHD43e/+12N+/g4yc7OjsmTJ8e9994bbdq0icGDB8eVV165x2/P7NixIyZPnhxLliyJE044IbO8tLS0yn7w0cBg4sSJmX1oyJAhUVZWFo8//niVMc4///z45Cc/GQ8++GCVU+Htcs4551QZ77XXXqvUplu3bpl1EyZMiAEDBlSa897srgZ16tSpyr64ZMmSKCwsjNzc3Bg7dmxMnz49+vXrV6MxAAAAIG28u7YHxx13XOZUNBEfXC9jT+siqp57fdy4cZmL5u5y4IEHZn7f3XnDkyTZ7fLqzJkzJ8rKyuLpp5+Oyy+/PA4++OA455xzat3Px0Uatm1FRUVkZWXFlClToqTkg2uV3HLLLXHWWWfFr371q306B/7HRYcOHeK0006Le++9N5IkidNOO63SefV3aQrbeufOnXHuuefGddddF717967Rbfg/dbmtd/cp/l3LP7w96+L53aFDh/jOd76T+XvgwIGxfv36uPnmm3cb2PDBNTxOO+20mDNnTsyfPz9mzpwZN998c9x9992Z5+hll10WV111VWzdujVycnJi3LhxMWbMmEwfRUVF8fzzz1fq98O1dOnSpfHMM8/EtGnTIuKDoGX48OFxzz33xBe+8IVKtxs2bFhMnz49HnzwwTj77LN3O+cJEyZUuV337t0r/T1nzpwoKCiIRYsWxWWXXRaTJ0+u8Tc8Iqruj7vbF/v06ROLFy+O999/Px588MEYNWpUzJ49W+gBAABAs9Qogcff/+u4xhi2VgoKCjIX+azNul06dOiwxzadO3eOBQsWVFq2fv362L59e5VPa9bEQQcdFBERhx12WLzzzjtx7bXXNlrg8dtTft8o49ZGGrZtly5d4sADD8yEHREffAI9SZJ4880341Of+lSN+6oL5993VoOOt79Gjx4dF198cURE/OpXv9ptm6awrTdu3BjPPfdcLFq0KDPfioqKSJIksrOz45FHHonjjz++Rn3Vlc7/Wtyg4+2v/d3Wu4Kml156KT7zmc9UWf+///u/mTeGO3fuvNsLyr/77rv7VLs/7Jhjjom77757v/rYVyNHjmyUcWurdevWceKJJ8aJJ54YV199dXzjG9+Ia665JhN47Aoo8/Pzo0uXLlXe+G/RosVen/MTJ06MHTt2VAo1kySJVq1axfr166Nt27aZ5VdeeWUcfvjhMWLEiEiSJHOdkA/r3LlztTXmoIMOijZt2kTv3r2jvLw8zjjjjHjhhRf2+K2Rj/b/0W9zrFmzpsq+mJOTk5nHwIED49lnn42f//zn8Zvf/KbaMQAAACBtGiXwaFuQ0xjDNhmDBg2KG264IVatWhVdunSJiA/OI5+bmxsDBgzYr76TJKnVOc7rWkluSfWNmrG62raDBw+OP/3pT1FWVhaFhYUR8cGpd1q0aBHdunWrl7nvTV5J6wYfc38MGTIktm3bFhEfnNO+PtTFti4uLo4lS5ZUWnb77bfHP//5z3jggQcyYWZDatm+fYOPuT/2d1ufdNJJ0a5duxg/fnyVwOOhhx6KZcuWZU5/NWjQoCgtLY1nnnkmc8HsBQsWRGlp6W7DktpYtGhRZj9qaGn9xli/fv1ixowZmb/3FlBWZ8eOHXHffffF+PHjq5zm6stf/nJMmTIlE6ztctVVV0V2dnaMGDEiKioq9vuDBiNHjowf/ehHcfvtt1f6BtCeDBo0KB599NFKbR955JFq98XGfp0AAAAA9ckprfbB1q1bq3yqMjs7u9KpVDZu3FilTX5+fhQXF8dJJ50U/fr1i5EjR8Z///d/x7p16+L73/9+XHjhhVFcXJxp/+KLL8a2bdti3bp1sXHjxli8eHFERBx55JER8cGnmXv06BF9+/aNiIi5c+fGz372s7jkkkvq4V5/PDSVbXvuuefG9ddfH1/72tfiuuuui/feey/GjRsXo0ePTu2bkw2pZcuW8dJLL2V+352msK1btGgR/fv3r9R/x44do3Xr1lWWs3v7u60LCgriN7/5TXz1q1+Niy66KC6++OIoLi6Oxx9/PMaNGxdnnXVW5pRFhxxySAwZMiQuvPDCzKfjL7roojj99NOjT58+mb5feeWVKCsri9WrV8eWLVsy27xfv36Rk5MT9957b7Rq1SqOOuqoaNGiRfzlL3+J2267LW666aa6fniahbVr18ZXvvKVGD16dBx++OFRVFQUzz33XNx8880xbNiwGveTJMlur7XSsWPH+Otf/xrr16+Pr3/965W+WRcRcdZZZ8XEiROrBB4REZdffnm0bNkyRo4cGRUVFTFixIjMuvfff7/KeEVFRZVOo/hhLVq0iG9/+9vx4x//OMaMGRP5+fl7vT/f+ta34nOf+1zcdNNNMWzYsPjzn/8cjz32WMydOzfT5sorr4xTTjklunfvHhs3boypU6fGrFmzYubMmXvtGwAAAFIroYpRo0Ylw4YN2+O6iKjy06dPn0ybnj177rbNmDFjMm3eeOON5LTTTkvy8vKSdu3aJRdffHFSXl5eaaw99bPLbbfdlhx66KFJfn5+UlxcnBx11FHJ7bffnuzcubNuH5BmJC3bNkmS5KWXXkq+8IUvJHl5eUm3bt2S7373u8nmzZvr7sFoZva2bZMkSYYNG5aMGjUq07YpbesPu+aaa5Ijjjhinx6Dj4u63tZJkiRPPvlkMmTIkKSkpCTJyclJ+vXrl/zsZz9LduzYUand2rVrkxEjRiRFRUVJUVFRMmLEiGT9+vWV2hx77LG7HXP58uVJkiTJ5MmTk0MOOSTJz89PioqKkgEDBiS//e1v9/dhabbKy8uTyy+/PDn66KOTkpKSJD8/P+nTp09y1VVXZWpiz549kwkTJuyxj0mTJu12m0REsmrVquT0009PTj311N3eduHChUlEJAsXLkyWL1+eRESyaNGiSm3Gjx+ftGzZMrnvvvuSJEn2ONZPf/rTJEmS5Iknnkgiosq+U1ZWlrRt2za56aabavTY/OlPf0r69OmTtGrVKunbt2/y4IMPVlo/evTopGfPnklOTk5ywAEHJCeccELyyCOP1KhvAAAASKOsJNnDlVoBAAAAAABSokVjTwAAAAAAAGB/CTwAAJqYwsLCPf7MmTOnsacHAAAATZJTWgEANDGvvPLKHtcdeOCBkZeX14CzAQAAgHQQeAAAAAAAAKnnlFYAAAAAAEDqCTwAAAAAAIDUy66vjisqKuLtt9+OoqKiyMrKqq9hAAAAAACAFEiSJDZu3Bhdu3aNFi3q/vsY9RZ4vP3229G9e/f66h4AAAAAAEihlStXRrdu3eq833oLPIqKiiLig4kXFxfX1zAAAAAAAEAKbNiwIbp3757JD+pavQUeu05jVVxcLPAAAAAAAAAiIurtMhguWg4AAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUq/eA49N6zbX9xDQIDat2xzP/eF/GmSfXle+Ln7/0pRYV76u3seCmrJf0tx8eJ9eu2JpzPnuqJh13bXx729eHBueey42jL8ldr7zTpXbbd68OZ577rnYvHlzpd/ToiZzTuP9onmo72ONY1k61WS7Nedt25zvG0BTl6YanJa5vvLe6rj0/unxynurG3sqNJK1G7fWa//1Hnhseb+8voeABrF5/ZZYOHVJbF6/pd7HWl++LqYu/X2sb+IHKT5e7Jc0Nx/ep0vffDXaz34xVkdWtJnx59jy4oux8ZYJsXPNmiq327x5czz//POZwGPX72lRkzmn8X7RPNT3scaxLJ1qst2a87ZtzvcNoKlLUw1Oy1xfX7s2nvl3fry+dm1jT4VGsrYs5YEHAAAAAABAfRN4AAAAAAAAqSfwAAAAAAAAUi+7vgfYumlbbCl1HQ/Sb2vZtgYfs2xbWZRuLW3wcWF3yraVNfYUoF6UbSuLlts3V/oUSFJW/f5eXp7u1zfl5eWxZcvur0uV9vtG+tXXayDHsnTb237xcdi2/jcAaHhpPL409ePFlh0fXCdwU/nOWL+p4d9ro/Ft3LKjXvuv98DjHzfMjrxWefU9DDRLP5z3g8aeAkCz98N5P4juKzbHNz+0bOcNP632dn/729/qb1INIO3zp3nzGojd+bjvFx/3+w9AzTT148X2zQdExDnx02mr4qexqrGnQyPYsXVTvfbvlFYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpV+/X8Dj5B8dGr/496nsYqHdrX18fD1/9eIOOef1nboheJQc16JiwJ6+XLm/y5wKFfXH9Z26Ilm1fiYibMsta/uCKaq/jceqpp0ZEeq+Fceqpp0b79u13u27t2rWpvV80D/X1GsixLN32tl98HLat/w0AGl4ajy9N/Xjx5Csvxw1L18UVZ3aJzx3ct7GnQyNYvOztOP6m6tvtq3oPPHILciKvpHV9DwP1Lrcwp8HHLMwpjJLckgYfF3anMKewsacA9aIwpzCyWuVH+YeWZRVWv7+3bp3u1zetW7eOvLy8Pa6DxlRfr4Ecy9Jtb/vFx2Hb+t8AoOGl8fjS1I8Xedn5EbEuClq3jLYFDf9eG42vKK9+IwmntAIAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB69X7R8rw2LnpJ85DfNi8GfPWwyG+7+wu81qW2rdvFV/ucG21bt6v3saCm7Jc0Nx/ep5Nun4y3ju0XnSOJ9780LLr36xfx3e9Ey44dq9wuPz8/jj766MjPz4+IqPR7Gnx0/vvaBupDfR9rHMvSqSbbrTlv2+Z83wCaujTV4LTMtVf79vEfh74cvdq3b+yp0EjaF+bWa/9ZSZIk9dHxhg0boqSkJEpLS6O4uLg+hgAAAAAAAFKivnMDp7QCAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AI+XWla+L3780JdaVr2uS/QEN472NW+OuJ16J9zZubeypANBMNYdjTXO4DzQt9ikA2HfrytfFxCV3x8Qld3svkjoj8Ei59eXrYurS38f6OioKdd0f0DDe27g1Js561T/bANSb5nCsaQ73gabFPgUA+259+br486vT48+vTvdeJHVG4AEAAAAAAKSewAMAAAAAAEi97MaeAHWjbFtZlG4trZN+gPTauGV7rN+0rbGnAUAztHHL9saeQp1xvKSuNKfnBQBAcyDwaCZ+OO8HjT0FoAm45L7nGnsKANDkOV4CAEDz5JRWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqecaHs3E9Z+5IXqVHLTf/bxeutz1QCDFfnH+wDi4c1FjTwOAZuiV1RubzbUvHC+pK83peQEA0BwIPJqJwpzCKMktqZN+gPQqymsVbQtyGnsaADRDRXmtGnsKdcbxkrrSnJ4XAADNgVNaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD0XLU+5tq3bxVf7nBttW7drkv0BDaNDUW58/fOfjA5FuY09FQCaqeZwrGkO94GmxT4FAPuubet2MeyTZ2R+h7qQlSRJUh8db9iwIUpKSqK0tDSKi4vrYwgAAAAAACAl6js3cEorAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1suur4yRJIiJiw4YN9TUEAAAAAACQErvygl35QV2rt8Bj7dq1ERHRvXv3+hoCAAAAAABImbVr10ZJSUmd91tvgUe7du0iImLFihX1MnEgHTZs2BDdu3ePlStXRnFxcWNPB2gE6gCgDgARagGgDgARpaWl0aNHj0x+UNfqLfBo0eKDy4OUlJQoYEAUFxerBfAxpw4A6gAQoRYA6gDwf/lBnfdbL70CAAAAAAA0IIEHAAAAAACQevUWeOTm5sY111wTubm59TUEkAJqAaAOAOoAEKEWAOoAUP91ICtJkqReegYAAAAAAGggTmkFAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9WodeLz11ltx3nnnRfv27SM/Pz+OPPLIWLhwYWZ9kiRx7bXXRteuXSMvLy8+//nPx7///e9KfWzdujUuueSS6NChQxQUFMTQoUPjzTff3P97AzSI6urAtGnT4uSTT44OHTpEVlZWLF68uEof6gCk395qwfbt2+Oyyy6Lww47LAoKCqJr165x/vnnx9tvv12pD7UA0q261wTXXntt9O3bNwoKCqJt27bxhS98IRYsWFCpD3UA0q26OvBhY8aMiaysrLj11lsrLVcHIP2qqwUXXHBBZGVlVfo55phjKvWhFkC61eQ1wUsvvRRDhw6NkpKSKCoqimOOOSZWrFiRWV8XdaBWgcf69etj8ODB0apVq/j73/8eL774YowfPz7atGmTaXPzzTfHLbfcEr/85S/j2Wefjc6dO8eJJ54YGzduzLT59re/HdOnT4+pU6fG3Llzo6ysLE4//fTYuXNnrSYPNLya1IFNmzbF4MGD48Ybb9xjP+oApFt1tWDz5s3x/PPPxw9/+MN4/vnnY9q0afHyyy/H0KFDK/WjFkB61eQ1Qe/eveOXv/xlLFmyJObOnRu9evWKk046Kd59991MG3UA0qsmdWCXGTNmxIIFC6Jr165V1qkDkG41rQVDhgyJVatWZX7+9re/VVqvFkB61aQOvPrqq/HZz342+vbtG7NmzYr/+Z//iR/+8IfRunXrTJs6qQNJLVx22WXJZz/72T2ur6ioSDp37pzceOONmWXl5eVJSUlJ8utf/zpJkiR5//33k1atWiVTp07NtHnrrbeSFi1aJDNnzqzNdIBGUF0d+LDly5cnEZEsWrSo0nJ1ANKvNrVgl2eeeSaJiOSNN95IkkQtgLTblzpQWlqaRETy2GOPJUmiDkDa1bQOvPnmm8mBBx6YvPDCC0nPnj2TCRMmZNapA5B+NakFo0aNSoYNG7bH9WoBpFtN6sDw4cOT8847b4/r66oO1OobHg899FAMHDgwvvKVr0THjh3jqKOOirvuuiuzfvny5bF69eo46aSTMstyc3Pj2GOPjXnz5kVExMKFC2P79u2V2nTt2jX69++faQM0XdXVgZpQByD99qUWlJaWRlZWVuYTHmoBpFtt68C2bdvizjvvjJKSkjjiiCMiQh2AtKtJHaioqIiRI0fGuHHj4tBDD63ShzoA6VfT1wSzZs2Kjh07Ru/evePCCy+MNWvWZNapBZBu1dWBioqKePjhh6N3795x8sknR8eOHePTn/50zJgxI9OmrupArQKP1157Le6444741Kc+Ff/4xz9i7Nixcemll8Z9990XERGrV6+OiIhOnTpVul2nTp0y61avXh05OTnRtm3bPbYBmq7q6kBNqAOQfrWtBeXl5XH55ZfHueeeG8XFxRGhFkDa1bQO/PWvf43CwsJo3bp1TJgwIR599NHo0KFDRKgDkHY1qQM33XRTZGdnx6WXXrrbPtQBSL+a1IJTTjklpkyZEv/85z9j/Pjx8eyzz8bxxx8fW7dujQi1ANKuujqwZs2aKCsrixtvvDGGDBkSjzzySJxxxhlx5plnxuzZsyOi7upAdm0mXlFREQMHDoyf/OQnERFx1FFHxb///e+444474vzzz8+0y8rKqnS7JEmqLPuomrQBGl9N68C+UAcgPWpTC7Zv3x5f/epXo6KiIm6//fZq+1YLIB1qWgeOO+64WLx4cbz33ntx1113xdlnnx0LFiyIjh077rFvdQDSobo6sHDhwvj5z38ezz//fK2f0+oApEdNXhMMHz48075///4xcODA6NmzZzz88MNx5pln7rFvtQDSobo6UFFRERERw4YNi+985zsREXHkkUfGvHnz4te//nUce+yxe+y7tnWgVt/w6NKlS/Tr16/SskMOOSRzJfXOnTtHRFRJXNasWZP51kfnzp1j27ZtsX79+j22AZqu6upATagDkH41rQXbt2+Ps88+O5YvXx6PPvpo5tsdEWoBpF1N60BBQUEcfPDBccwxx8TEiRMjOzs7Jk6cGBHqAKRddXVgzpw5sWbNmujRo0dkZ2dHdnZ2vPHGG/G9730vevXqFRHqADQH+/I+QZcuXaJnz56xbNmyiFALIO2qqwMdOnSI7OzsarOFuqgDtQo8Bg8eHEuXLq207OWXX46ePXtGRMRBBx0UnTt3jkcffTSzftu2bTF79uz4zGc+ExERAwYMiFatWlVqs2rVqnjhhRcybYCmq7o6UBPqAKRfTWrBrrBj2bJl8dhjj0X79u0rtVcLIN329TVBkiSZ01eoA5Bu1dWBkSNHxr/+9a9YvHhx5qdr164xbty4+Mc//hER6gA0B/vymmDt2rWxcuXK6NKlS0SoBZB21dWBnJyc+H//7//ttU2d1YEaX948SZJnnnkmyc7OTm644YZk2bJlyZQpU5L8/Pzkd7/7XabNjTfemJSUlCTTpk1LlixZkpxzzjlJly5dkg0bNmTajB07NunWrVvy2GOPJc8//3xy/PHHJ0cccUSyY8eO2kwHaAQ1qQNr165NFi1alDz88MNJRCRTp05NFi1alKxatSrTRh2AdKuuFmzfvj0ZOnRo0q1bt2Tx4sXJqlWrMj9bt27N9KMWQHpVVwfKysqSK664Ipk/f37y+uuvJwsXLky+/vWvJ7m5uckLL7yQ6UcdgPSqyf8GH9WzZ89kwoQJlZapA5Bu1dWCjRs3Jt/73veSefPmJcuXL0+eeOKJZNCgQcmBBx7o/UJoJmrymmDatGlJq1atkjvvvDNZtmxZ8otf/CJp2bJlMmfOnEybuqgDtQo8kiRJ/vKXvyT9+/dPcnNzk759+yZ33nlnpfUVFRXJNddck3Tu3DnJzc1NPve5zyVLliyp1GbLli3JxRdfnLRr1y7Jy8tLTj/99GTFihW1nQrQSKqrA5MmTUoiosrPNddck2mjDkD67a0WLF++fLd1ICKSJ554ItNOLYB021sd2LJlS3LGGWckXbt2TXJycpIuXbokQ4cOTZ555plKfagDkG7V/W/wUbsLPNQBSL+91YLNmzcnJ510UnLAAQckrVq1Snr06JGMGjWqyvNcLYB0q8lrgokTJyYHH3xw0rp16+SII45IZsyYUWl9XdSBrCRJklp8OwUAAAAAAKDJqdU1PAAAAAAAAJoigQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAADYb9dee20ceeSRjT0NAADgYywrSZKksScBAAA0XVlZWXtdP2rUqPjlL38ZW7dujfbt2zfQrAAAACoTeAAAAHu1evXqzO9//OMf4+qrr46lS5dmluXl5UVJSUljTA0AACDDKa0AAIC96ty5c+anpKQksrKyqiz76CmtLrjggvjSl74UP/nJT6JTp07Rpk2buO6662LHjh0xbty4aNeuXXTr1i3uueeeSmO99dZbMXz48Gjbtm20b98+hg0bFq+//nrD3mEAACCVBB4AAEC9+Oc//xlvv/12PPnkk3HLLbfEtddeG6effnq0bds2FixYEGPHjo2xY8fGypUrIyJi8+bNcdxxx0VhYWE8+eSTMXfu3CgsLIwhQ4bEtm3bGvneAAAATZ3AAwAAqBft2rWL2267Lfr06ROjR4+OPn36xObNm+PKK6+MT33qU3HFFVdETk5OPPXUUxERMXXq1GjRokXcfffdcdhhh8UhhxwSkyZNihUrVsSsWbMa984AAABNXnZjTwAAAGieDj300GjR4v8+Y9WpU6fo379/5u+WLVtG+/btY82aNRERsXDhwnjllVeiqKioUj/l5eXx6quvNsykAQCA1BJ4AAAA9aJVq1aV/s7KytrtsoqKioiIqKioiAEDBsSUKVOq9HXAAQfU30QBAIBmQeABAAA0CUcffXT88Y9/jI4dO0ZxcXFjTwcAAEgZ1/AAAACahBEjRkSHDh1i2LBhMWfOnFi+fHnMnj07vvWtb8Wbb77Z2NMDAACaOIEHAADQJOTn58eTTz4ZPXr0iDPPPDMOOeSQGD16dGzZssU3PgAAgGplJUmSNPYkAAAAAAAA9odveAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACk3v8HUEYGyV77YKkAAAAASUVORK5CYII=",
"text/plain": [
""
]
@@ -573,7 +573,7 @@
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAl9UlEQVR4nO3de3RV5Z0//k8gEEJIjpAYAnJzitzECpWZqnTVajtKRwutq/WCIkjrZU3VTlupdrqsOi67rFOko1ZbrTq9UGnroF+trbd6RSq2CK1WRlFBRblowBAEApL9+4OfZ4xccjsnJzu8XmtlLbL3s5/97PPs8znn8M7ZuyhJkiQAAAAAAABSrFuhBwAAAAAAANBeAg8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2Bx27MmDEjioqKdvl56aWX9rhu0qRJ2e2HDRu22zZXXXVVts1rr70Wn/vc56KsrCyqqqriggsuiG3btmXXb926NWbMmBGHHHJIFBcXx+c///ldxrlgwYKYOHFiVFZWRmlpaYwaNSrmzJmT18cm7dIytxERDQ0N8Z3vfCeGDh0aJSUl8ZGPfCRuvfXWvD02aff+/J177rm7rPvXf/3XKCoqihkzZjRp21nm+n1PPvlkFBcXx7hx49r1WHR1uZ7riIiFCxfGv/zLv0Tfvn2jV69eccghh8Ts2bNjx44dTdpt2LAhpk2bFplMJjKZTEybNi3eeeedJm2+9rWvxWGHHRYlJSW7ncuVK1fudkz33Xdfux4XAAAAgH1dcSF2uuHdbc03yqG+ZT1bvc2kSZPitttua7Js//333+O6kpKSJr//x3/8R5x11llNlpWXl0dExI4dO+L444+P/fffPxYsWBC1tbUxffr0SJIkrrvuumyb0tLSuOCCC+J//ud/djvGsrKyOO+88+KjH/1olJWVxYIFC+Kcc86JsrKyOPvss1t9zLlQ11DXYfvKlGTatF0a5jYi4qSTToq1a9fGLbfcEsOHD49169bFe++916Zjbq8tdVs7dH+lmV5t2m7w4MExb968mDNnTpSWlkbEztDh9ttvjyFDhjRp25nmOiKirq4uzjjjjPj0pz8da9eubf3B58iO2toO3V/3yso2bZfLub7zzjvjpJNOijPPPDMeeeSR2G+//eKhhx6Kb33rW/HUU0/Fb37zmygqKoqIiKlTp8aqVauy4cTZZ58d06ZNi3vuuSfbX5IkMXPmzFi0aFH87W9/2+MxPPTQQ3HwwQdnf+/Xr1+bHgsAAAAAdipI4PHZqx/p0P09dflxrd6mpKQkampqWr3ufeXl5Xts88ADD8Tzzz8fr7/+egwcODAiImbPnh0zZsyIK6+8MioqKqKsrCxuvPHGiNj5V98f/gviiIjx48fH+PHjs78PGzYs5s+fH0888UTBAo9pf5jaYfu6+/P3tmm7NMztfffdF4899li88sor2f8EHTZsWAuPMPd+fsYdHbq/c/7f6W3a7mMf+1i88sorMX/+/DjttNMiImL+/PkxePDg+Id/+IcmbTvLXL/vnHPOialTp0b37t3jrrvuauER596aj47r0P0d8MbrbdouV3P97rvvxllnnRWTJ0+Om266Kbv8K1/5SvTv3z8mT54cv/nNb+Lkk0+OZcuWxX333RdPPfVUfPzjH4+IiJtvvjmOOOKIeOGFF2LkyJEREXHttddGRMRbb72118CjsrKy2XMQAAAAgJZzSasC+NOf/hRjx47N/idpRMRxxx0XDQ0NsXjx4jb3u2TJkli4cGEcddRRuRgmbZCrub377rtjwoQJcfXVV8cBBxwQI0aMiAsvvDC2bNmSj2F3KWeeeWaTv+a/9dZbY+bMmTnfTy6fx7fddlu8/PLLcemll+Z6mF1aLub6gQceiNra2rjwwgt3Wfe5z30uRowYEbfffntE7JzzTCaTDTsiIg4//PDIZDKxcOHCVo9/8uTJUV1dHRMnTow77ujYUBEAAACgKxJ47MHvfve76NOnT/bnS1/60h7X9enTJ6644oom21900UW7tHn00UcjImLNmjXRv3//Ju379u0bPXv2jDVr1rR6rIMGDYqSkpKYMGFCfPWrX42vfOUrrT/gfUga5vaVV16JBQsWxHPPPRd33nln/PCHP4w77rgjvvrVr7b9wPcR06ZNiwULFsTKlSvj1VdfjSeffDJOP33Xb4x0lrlevnx5XHzxxTF37twoLi7Il+5SKxdz/eKLL0ZExOjRo3e7j1GjRmXbrFmzJqqrq3dpU11d3ao579OnT1xzzTVxxx13xO9///v49Kc/HSeffHL88pe/bHEfAAAAAOzK/67twdFHH529FE3Ezvtl7GldxK7XXp81a1b2prnvO+CAA7L/fv968B+UJMlulzfniSeeiE2bNsVTTz0VF198cQwfPjxOPfXUVvezr0jD3DY2NkZRUVHMnTs3Mpmd9yq55ppr4otf/GL86Ec/yt6zgF1VVVXF8ccfHz/72c8iSZI4/vjjo6qqapd2nWGud+zYEVOnTo3LL788RowY0aJt+D+5nOskSXa7jw/PZy6e31VVVfH1r389+/uECRNiw4YNcfXVV+82sAEAAACgZQoSePzhW0cXYretUlZWFsOHD2/1uvdVVVXtsU1NTU0sWrSoybINGzbE9u3bd/mL8ZY48MADIyLikEMOibVr18Zll11WsMDjF5/9VUH22xppmNsBAwbEAQcckA07Inb+BXqSJLFq1ao46KCDWtxXLpzx8y926P7aa+bMmXHeeedFRMSPfvSj3bbpDHNdX18ff/nLX2LJkiXZ8TY2NkaSJFFcXBwPPPBAHHPMMS3qK1dq/ra0Q/fXXu2d6/eDpmXLlsWRRx65y/r//d//jTFjxkTEzjnf3Q3l33rrrTbV7g86/PDD46c//Wm7+gAAAADY1xUk8Ohb1rMQu+00jjjiiLjyyitj9erVMWDAgIjYeR35kpKSOOyww9rVd5Ik0dDQkIthtkmmJNN8oy4sV3M7ceLE+O1vfxubNm2KPn36RMTOS+9069YtBg0alJex701ppleH77M9Jk2aFNu2bYuInffVyIdczHVFRUU8++yzTZbdcMMN8fDDD8cdd9yRDTM7UvfKyg7fZ3u0d66PPfbY6NevX8yePXuXwOPuu++O5cuXZy9/dcQRR0RdXV08/fTT8U//9E8REbFo0aKoq6vbbVjSGkuWLMmeRwAAAAC0jUtatUFDQ8Mu12svLi5ucimV+vr6Xdr07t07Kioq4thjj40xY8bEtGnT4j//8z9j/fr1ceGFF8ZZZ50VFRUV2fbPP/98bNu2LdavXx/19fWxdOnSiIgYN25cROz8a+YhQ4bEqFGjIiJiwYIF8YMf/CDOP//8PBz1vqGzzO3UqVPjiiuuiDPPPDMuv/zyePvtt2PWrFkxc+ZMl7Nqge7du8eyZcuy/96dzjDX3bp1i7Fjxzbpv7q6Onr16rXLcnavvXNdVlYWP/nJT+KUU06Js88+O84777yoqKiIP/7xjzFr1qz44he/GCeddFJE7PyW1aRJk+Kss86Kn/zkJxERcfbZZ8cJJ5wQI0eOzPb90ksvxaZNm2LNmjWxZcuW7JyPGTMmevbsGT/72c+iR48eMX78+OjWrVvcc889ce2118b3v//9XD88AAAAAPuWhF1Mnz49mTJlyh7XRcQuPyNHjsy2GTp06G7bnHPOOdk2r776anL88ccnpaWlSb9+/ZLzzjsv2bp1a5N97amf91177bXJwQcfnPTu3TupqKhIxo8fn9xwww3Jjh07cvuAdCFpmdskSZJly5Yln/nMZ5LS0tJk0KBByTe+8Y1k8+bNuXswupi9zW2SJMmUKVOS6dOnZ9t2prn+oEsvvTQ59NBD2/QY7CtyPddJkiSPP/54MmnSpCSTySQ9e/ZMxowZk/zgBz9I3nvvvSbtamtrk9NOOy0pLy9PysvLk9NOOy3ZsGFDkzZHHXXUbve5YsWKJEmS5L//+7+T0aNHJ717907Ky8uTww47LPnFL37R3ocFAAAAYJ9XlCR7uFMrAAAAAABASnQr9AAAAAAAAADaS+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6hXnq+PGxsZ48803o7y8PIqKivK1GwAAAAAAIAWSJIn6+voYOHBgdOuW++9j5C3wePPNN2Pw4MH56h4AAAAAAEih119/PQYNGpTzfvMWeJSXl0fEzoFXVFTkazcAAAAAAEAKbNy4MQYPHpzND3Itb4HH+5exqqioEHgAAAAAAAAREXm7DYablgMAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9vAce767fnO9dQJe0avUb8YPfzo5Vq98o9FDoYtZvXR+/WjY31m9dHxE76/Rfbv9rTut1PvpMsx1r18bG2dfEjrVrCz2UnOkKx5Sr8zRN5/uHn//tbQe5lo9z74N9vl3fEDc/8lK8Xd+Qs/7Zt3XkOaU2A+RPV6mxXeU4IrrWsdCx8h54bHlna753AV3Smto18XiPh2NN7ZpCD4UuZsPW9THvhV/Fhv//TcPmDVti8bxnY/OGLTnbRz76TLMd69ZF/TVzYse6dYUeSs50hWPK1XmapvP9w8//9raDXMvHuffBPt+ub4hbHn1Z4EHOdOQ5pTYD5E9XqbFd5Tgiutax0LFc0goAAAAAAEg9gQcAAAAAAJB6xfneQcO722JLnctaQWtt27w9IiLefe/dqGuoK/Bo6Eo2bdu02+UNm3JXrxs2bctJP11N4zt1saO2ttDDyInGd7pOXWrvuZ/G833Ttk17fW3ZU52AjtLcOdravj6sfsv22PBu+p67dD71W7Z3+D5z+fwAYKeu9v63K7xWdLU5oePkPfC4/8rHorRHab53A11OXeX6iMkRs1d+P2JloUfDvuDe7/6x0EPo8mpPObXQQ2A39sVz/5KF3yn0EGCv8n2Onv/zv+S1f8gnNRyA5nitYF/mklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpl/d7eBz3naNi2Ngh+d4NdDlLXlgaC968P7457KIYN/rQQg+HLmRl3YrdXs/z+P/4dFQO65uTfdSu3LBP3hehOZXzbo8eY0YXehg5sf35ZV3mniTtPffTeL5fceSVMSxz4B7X76lOQEdp7hxtjd2dz9edMSGG15TnpH/2bS+tqe/we8Lk8vkBwE5d7f1vV3it6GpzQsfJe+BRUtYzSjO98r0b6HJ69u4RERFlxWWRKckUeDR0JX169tnt8pI+uavXJX165qSfrqbbfpnoXllZ6GHkxI79uk5dau+5n8bzvU/PPnt9bdlTnYCO0tw52tq+Pqy8tEf0LUvfc5fOp7y0R4fvM5fPDwB26mrvf7vCa0VXmxM6jktaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASL28Bx6l+7lhObRFTWVNfHL7MVFTWVPoodDF9O3VL04ZOTX69uoXERG9+5bGYaccEr37luZsH/noM826V1dH+Te+Ht2rqws9lJzpCseUq/M0Tef7h5//7W0HuZaPc++DfVaVl8SXP/WRqCovyVn/7Ns68pxSmwHyp6vU2K5yHBFd61joWEVJkiT56Hjjxo2RyWSirq4uKioq8rELAAAAAAAgJfKdG7ikFQAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw6ALWb10fv1o2N9ZvXd8p+sm33Y2zvWN/u74hbn7kpXi7viEn7XIlX/vr6OMg/8xpunWW+ess4wA6H/WBQmnJ+/y2fBbYsXZtbJx9TexYu7ZN42rv9sBOrXl98VrUsQrxeJvj3POY0hnV5vl8FHh0ARu2ro95L/wqNrQzqMhVP/m2u3G2d+xv1zfELY++3KLAoyXtciVf++vo4yD/zGm6dZb56yzjADof9YFCacn7/LZ8Ftixbl3UXzMndqxb16ZxtXd7YKfWvL54LepYhXi8zXHueUzpjGo3CTwAAAAAAAD2SuABAAAAAACknsADAAAAAABIveJCD4Dc2bRtU9Q11LVr+zT54PHmauz1W7bHhne37XV9ITQ3rrb0R9eU63OFjtHZnpPOI+DDOludYt+zt8867fks0PhOXeyorW3TdkDutOT9p9eiwujIzwbmOH98xqMzqd/yXl77F3h0IZcs/E6hh9Ch8nG85//8LznvMxc667jofJwr5ILzCIDOJl+fdWpPOTUv/QKt4/1n52VuugbzSGfyXsO7ee3fJa0AAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSzz08upArjrwyhmUObPP2K+tWpOo+IB883lyN/bozJsTwmvI9rn9pTX1BrnvY3Lhaq1DHQf7l+lyhY3S256TzCPiwzlan2Pfs7bNOez4LVM67PXqMGd3q7bY/v8z9PyCHWvL+02tRYXTkZwNznD8+49GZLF3+Zhzz/fz1L/DoQvr07BOZkky7tk+TDx5vrsZeXtoj+pb13Ov6QmhuXG3pj64p1+cKHaOzPSedR8CHdbY6xb5nb5912vNZoNt+meheWdnq7Xbs1/bPXcCuWvL+02tRYXTkZwNznD8+49GZlJfmN5JwSSsAAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3h0AX179YtTRk6Nvr36dYp+8m1342zv2KvKS+LLn/pIVJWX5KRdruRrfx19HOSfOU23zjJ/nWUcQOejPlAoLXmf35bPAt2rq6P8G1+P7tXVbRpXe7cHdmrN64vXoo5ViMfbHOeex5TOqLJPfs/HoiRJknx0vHHjxshkMlFXVxcVFRX52AUAAAAAAJAS+c4NfMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/DYh7xd3xA3P/JSvF3f0Kb1AEDE+q3r41fL5sb6retbtBwAAADoGAKPfcjb9Q1xy6Mv7zXw2Nt6ACBiw9b1Me+FX8WGDwUbe1oOAAAAdAyBBwAAAAAAkHoCDwAAAAAAIPWKCz0AOl79lu2x4d1tu10OALTMpm2boq6hrsnvAAAAQOEIPPZB5//8L4UeAgCk3iULv1PoIQAAAAAf4JJWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqeceHvug686YEMNryndZ/tKaevf3AIAWuuLIK2NY5sDs7yvrVrivBwAAABSQwGMfVF7aI/qW9dztcgCgZfr07BOZkkyT3wEAAIDCcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw2IdUlZfElz/1kagqL2nTegAgom+vfnHKyKnRt1e/Fi0HAAAAOkZRkiRJPjreuHFjZDKZqKuri4qKinzsAgAAAAAASIl85wa+4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1ivPVcZIkERGxcePGfO0CAAAAAABIiffzgvfzg1zLW+BRW1sbERGDBw/O1y4AAAAAAICUqa2tjUwmk/N+8xZ49OvXLyIiXnvttbwMHEiHjRs3xuDBg+P111+PioqKQg8HKAB1AFAHgAi1AFAHgIi6uroYMmRINj/ItbwFHt267bw9SCaTUcCAqKioUAtgH6cOAOoAEKEWAOoA8H/5Qc77zUuvAAAAAAAAHUjgAQAAAAAApF7eAo+SkpK49NJLo6SkJF+7AFJALQDUAUAdACLUAkAdAPJfB4qSJEny0jMAAAAAAEAHcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqdfqwOONN96I008/PSorK6N3794xbty4WLx4cXZ9kiRx2WWXxcCBA6O0tDQ+9alPxd///vcmfTQ0NMT5558fVVVVUVZWFpMnT45Vq1a1/2iADtFcHZg/f34cd9xxUVVVFUVFRbF06dJd+lAHIP32Vgu2b98eF110URxyyCFRVlYWAwcOjDPOOCPefPPNJn2oBZBuzb0nuOyyy2LUqFFRVlYWffv2jc985jOxaNGiJn2oA5BuzdWBDzrnnHOiqKgofvjDHzZZrg5A+jVXC2bMmBFFRUVNfg4//PAmfagFkG4teU+wbNmymDx5cmQymSgvL4/DDz88Xnvttez6XNSBVgUeGzZsiIkTJ0aPHj3iD3/4Qzz//PMxe/bs2G+//bJtrr766rjmmmvi+uuvjz//+c9RU1MT//zP/xz19fXZNv/2b/8Wd955Z8ybNy8WLFgQmzZtihNOOCF27NjRqsEDHa8ldeDdd9+NiRMnxlVXXbXHftQBSLfmasHmzZvjmWeeiUsuuSSeeeaZmD9/frz44osxefLkJv2oBZBeLXlPMGLEiLj++uvj2WefjQULFsSwYcPi2GOPjbfeeivbRh2A9GpJHXjfXXfdFYsWLYqBAwfusk4dgHRraS2YNGlSrF69Ovvz+9//vsl6tQDSqyV14OWXX45PfOITMWrUqHj00Ufjr3/9a1xyySXRq1evbJuc1IGkFS666KLkE5/4xB7XNzY2JjU1NclVV12VXbZ169Ykk8kkP/7xj5MkSZJ33nkn6dGjRzJv3rxsmzfeeCPp1q1bct9997VmOEABNFcHPmjFihVJRCRLlixpslwdgPRrTS1439NPP51ERPLqq68mSaIWQNq1pQ7U1dUlEZE89NBDSZKoA5B2La0Dq1atSg444IDkueeeS4YOHZrMmTMnu04dgPRrSS2YPn16MmXKlD2uVwsg3VpSB04++eTk9NNP3+P6XNWBVn3D4+67744JEybEl770paiuro7x48fHzTffnF2/YsWKWLNmTRx77LHZZSUlJXHUUUfFwoULIyJi8eLFsX379iZtBg4cGGPHjs22ATqv5upAS6gDkH5tqQV1dXVRVFSU/QsPtQDSrbV1YNu2bXHTTTdFJpOJQw89NCLUAUi7ltSBxsbGmDZtWsyaNSsOPvjgXfpQByD9Wvqe4NFHH43q6uoYMWJEnHXWWbFu3brsOrUA0q25OtDY2Bj33ntvjBgxIo477riorq6Oj3/843HXXXdl2+SqDrQq8HjllVfixhtvjIMOOijuv//+OPfcc+OCCy6In//85xERsWbNmoiI6N+/f5Pt+vfvn123Zs2a6NmzZ/Tt23ePbYDOq7k60BLqAKRfa2vB1q1b4+KLL46pU6dGRUVFRKgFkHYtrQO/+93vok+fPtGrV6+YM2dOPPjgg1FVVRUR6gCkXUvqwPe///0oLi6OCy64YLd9qAOQfi2pBZ/97Gdj7ty58fDDD8fs2bPjz3/+cxxzzDHR0NAQEWoBpF1zdWDdunWxadOmuOqqq2LSpEnxwAMPxBe+8IU48cQT47HHHouI3NWB4tYMvLGxMSZMmBDf+973IiJi/Pjx8fe//z1uvPHGOOOMM7LtioqKmmyXJMkuyz6sJW2AwmtpHWgLdQDSozW1YPv27XHKKadEY2Nj3HDDDc32rRZAOrS0Dhx99NGxdOnSePvtt+Pmm2+Ok046KRYtWhTV1dV77FsdgHRorg4sXrw4/uu//iueeeaZVj+n1QFIj5a8Jzj55JOz7ceOHRsTJkyIoUOHxr333hsnnnjiHvtWCyAdmqsDjY2NERExZcqU+PrXvx4REePGjYuFCxfGj3/84zjqqKP22Hdr60CrvuExYMCAGDNmTJNlo0ePzt5JvaamJiJil8Rl3bp12W991NTUxLZt22LDhg17bAN0Xs3VgZZQByD9WloLtm/fHieddFKsWLEiHnzwwey3OyLUAki7ltaBsrKyGD58eBx++OFxyy23RHFxcdxyyy0RoQ5A2jVXB5544olYt25dDBkyJIqLi6O4uDheffXV+OY3vxnDhg2LCHUAuoK2/D/BgAEDYujQobF8+fKIUAsg7ZqrA1VVVVFcXNxstpCLOtCqwGPixInxwgsvNFn24osvxtChQyMi4sADD4yampp48MEHs+u3bdsWjz32WBx55JEREXHYYYdFjx49mrRZvXp1PPfcc9k2QOfVXB1oCXUA0q8lteD9sGP58uXx0EMPRWVlZZP2agGkW1vfEyRJkr18hToA6dZcHZg2bVr87W9/i6VLl2Z/Bg4cGLNmzYr7778/ItQB6Ara8p6gtrY2Xn/99RgwYEBEqAWQds3VgZ49e8Y//uM/7rVNzupAi29vniTJ008/nRQXFydXXnllsnz58mTu3LlJ7969k1/+8pfZNldddVWSyWSS+fPnJ88++2xy6qmnJgMGDEg2btyYbXPuuecmgwYNSh566KHkmWeeSY455pjk0EMPTd57773WDAcogJbUgdra2mTJkiXJvffem0REMm/evGTJkiXJ6tWrs23UAUi35mrB9u3bk8mTJyeDBg1Kli5dmqxevTr709DQkO1HLYD0aq4ObNq0Kfn2t7+d/OlPf0pWrlyZLF68OPnyl7+clJSUJM8991y2H3UA0qslnw0+bOjQocmcOXOaLFMHIN2aqwX19fXJN7/5zWThwoXJihUrkkceeSQ54ogjkgMOOMD/F0IX0ZL3BPPnz0969OiR3HTTTcny5cuT6667LunevXvyxBNPZNvkog60KvBIkiS55557krFjxyYlJSXJqFGjkptuuqnJ+sbGxuTSSy9NampqkpKSkuSTn/xk8uyzzzZps2XLluS8885L+vXrl5SWliYnnHBC8tprr7V2KECBNFcHbrvttiQidvm59NJLs23UAUi/vdWCFStW7LYORETyyCOPZNupBZBue6sDW7ZsSb7whS8kAwcOTHr27JkMGDAgmTx5cvL000836UMdgHRr7rPBh+0u8FAHIP32Vgs2b96cHHvsscn++++f9OjRIxkyZEgyffr0XZ7nagGkW0veE9xyyy3J8OHDk169eiWHHnpoctdddzVZn4s6UJQkSdKKb6cAAAAAAAB0Oq26hwcAAAAAAEBnJPAAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AACAdrvsssti3LhxhR4GAACwDytKkiQp9CAAAIDOq6ioaK/rp0+fHtdff300NDREZWVlB40KAACgKYEHAACwV2vWrMn++9e//nV897vfjRdeeCG7rLS0NDKZTCGGBgAAkOWSVgAAwF7V1NRkfzKZTBQVFe2y7MOXtJoxY0Z8/vOfj+9973vRv3//2G+//eLyyy+P9957L2bNmhX9+vWLQYMGxa233tpkX2+88UacfPLJ0bdv36isrIwpU6bEypUrO/aAAQCAVBJ4AAAAefHwww/Hm2++GY8//nhcc801cdlll8UJJ5wQffv2jUWLFsW5554b5557brz++usREbF58+Y4+uijo0+fPvH444/HggULok+fPjFp0qTYtm1bgY8GAADo7AQeAABAXvTr1y+uvfbaGDlyZMycOTNGjhwZmzdvjn//93+Pgw46KL797W9Hz54948knn4yIiHnz5kW3bt3ipz/9aRxyyCExevTouO222+K1116LRx99tLAHAwAAdHrFhR4AAADQNR188MHRrdv//Y1V//79Y+zYsdnfu3fvHpWVlbFu3bqIiFi8eHG89NJLUV5e3qSfrVu3xssvv9wxgwYAAFJL4AEAAORFjx49mvxeVFS022WNjY0REdHY2BiHHXZYzJ07d5e+9t9///wNFAAA6BIEHgAAQKfwsY99LH79619HdXV1VFRUFHo4AABAyriHBwAA0CmcdtppUVVVFVOmTIknnngiVqxYEY899lh87Wtfi1WrVhV6eAAAQCcn8AAAADqF3r17x+OPPx5DhgyJE088MUaPHh0zZ86MLVu2+MYHAADQrKIkSZJCDwIAAAAAAKA9fMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOr9fw+gShyFf/1LAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABjwAAADyCAYAAAD5q2z1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAl9UlEQVR4nO3de3RV5Z0//k8gEEJIjpAYAnJzitzECpWZqnTVajtKRwutq/WCIkjrZU3VTlupdrqsOi67rFOko1ZbrTq9UGnroF+trbd6RSq2CK1WRlFBRblowBAEApL9+4OfZ4xccjsnJzu8XmtlLbL3s5/97PPs8znn8M7ZuyhJkiQAAAAAAABSrFuhBwAAAAAAANBeAg8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2Bx27MmDEjioqKdvl56aWX9rhu0qRJ2e2HDRu22zZXXXVVts1rr70Wn/vc56KsrCyqqqriggsuiG3btmXXb926NWbMmBGHHHJIFBcXx+c///ldxrlgwYKYOHFiVFZWRmlpaYwaNSrmzJmT18cm7dIytxERDQ0N8Z3vfCeGDh0aJSUl8ZGPfCRuvfXWvD02aff+/J177rm7rPvXf/3XKCoqihkzZjRp21nm+n1PPvlkFBcXx7hx49r1WHR1uZ7riIiFCxfGv/zLv0Tfvn2jV69eccghh8Ts2bNjx44dTdpt2LAhpk2bFplMJjKZTEybNi3eeeedJm2+9rWvxWGHHRYlJSW7ncuVK1fudkz33Xdfux4XAAAAgH1dcSF2uuHdbc03yqG+ZT1bvc2kSZPitttua7Js//333+O6kpKSJr//x3/8R5x11llNlpWXl0dExI4dO+L444+P/fffPxYsWBC1tbUxffr0SJIkrrvuumyb0tLSuOCCC+J//ud/djvGsrKyOO+88+KjH/1olJWVxYIFC+Kcc86JsrKyOPvss1t9zLlQ11DXYfvKlGTatF0a5jYi4qSTToq1a9fGLbfcEsOHD49169bFe++916Zjbq8tdVs7dH+lmV5t2m7w4MExb968mDNnTpSWlkbEztDh9ttvjyFDhjRp25nmOiKirq4uzjjjjPj0pz8da9eubf3B58iO2toO3V/3yso2bZfLub7zzjvjpJNOijPPPDMeeeSR2G+//eKhhx6Kb33rW/HUU0/Fb37zmygqKoqIiKlTp8aqVauy4cTZZ58d06ZNi3vuuSfbX5IkMXPmzFi0aFH87W9/2+MxPPTQQ3HwwQdnf+/Xr1+bHgsAAAAAdipI4PHZqx/p0P09dflxrd6mpKQkampqWr3ufeXl5Xts88ADD8Tzzz8fr7/+egwcODAiImbPnh0zZsyIK6+8MioqKqKsrCxuvPHGiNj5V98f/gviiIjx48fH+PHjs78PGzYs5s+fH0888UTBAo9pf5jaYfu6+/P3tmm7NMztfffdF4899li88sor2f8EHTZsWAuPMPd+fsYdHbq/c/7f6W3a7mMf+1i88sorMX/+/DjttNMiImL+/PkxePDg+Id/+IcmbTvLXL/vnHPOialTp0b37t3jrrvuauER596aj47r0P0d8MbrbdouV3P97rvvxllnnRWTJ0+Om266Kbv8K1/5SvTv3z8mT54cv/nNb+Lkk0+OZcuWxX333RdPPfVUfPzjH4+IiJtvvjmOOOKIeOGFF2LkyJEREXHttddGRMRbb72118CjsrKy2XMQAAAAgJZzSasC+NOf/hRjx47N/idpRMRxxx0XDQ0NsXjx4jb3u2TJkli4cGEcddRRuRgmbZCrub377rtjwoQJcfXVV8cBBxwQI0aMiAsvvDC2bNmSj2F3KWeeeWaTv+a/9dZbY+bMmTnfTy6fx7fddlu8/PLLcemll+Z6mF1aLub6gQceiNra2rjwwgt3Wfe5z30uRowYEbfffntE7JzzTCaTDTsiIg4//PDIZDKxcOHCVo9/8uTJUV1dHRMnTow77ujYUBEAAACgKxJ47MHvfve76NOnT/bnS1/60h7X9enTJ6644oom21900UW7tHn00UcjImLNmjXRv3//Ju379u0bPXv2jDVr1rR6rIMGDYqSkpKYMGFCfPWrX42vfOUrrT/gfUga5vaVV16JBQsWxHPPPRd33nln/PCHP4w77rgjvvrVr7b9wPcR06ZNiwULFsTKlSvj1VdfjSeffDJOP33Xb4x0lrlevnx5XHzxxTF37twoLi7Il+5SKxdz/eKLL0ZExOjRo3e7j1GjRmXbrFmzJqqrq3dpU11d3ao579OnT1xzzTVxxx13xO9///v49Kc/HSeffHL88pe/bHEfAAAAAOzK/67twdFHH529FE3Ezvtl7GldxK7XXp81a1b2prnvO+CAA7L/fv968B+UJMlulzfniSeeiE2bNsVTTz0VF198cQwfPjxOPfXUVvezr0jD3DY2NkZRUVHMnTs3Mpmd9yq55ppr4otf/GL86Ec/yt6zgF1VVVXF8ccfHz/72c8iSZI4/vjjo6qqapd2nWGud+zYEVOnTo3LL788RowY0aJt+D+5nOskSXa7jw/PZy6e31VVVfH1r389+/uECRNiw4YNcfXVV+82sAEAAACgZQoSePzhW0cXYretUlZWFsOHD2/1uvdVVVXtsU1NTU0sWrSoybINGzbE9u3bd/mL8ZY48MADIyLikEMOibVr18Zll11WsMDjF5/9VUH22xppmNsBAwbEAQcckA07Inb+BXqSJLFq1ao46KCDWtxXLpzx8y926P7aa+bMmXHeeedFRMSPfvSj3bbpDHNdX18ff/nLX2LJkiXZ8TY2NkaSJFFcXBwPPPBAHHPMMS3qK1dq/ra0Q/fXXu2d6/eDpmXLlsWRRx65y/r//d//jTFjxkTEzjnf3Q3l33rrrTbV7g86/PDD46c//Wm7+gAAAADY1xUk8Ohb1rMQu+00jjjiiLjyyitj9erVMWDAgIjYeR35kpKSOOyww9rVd5Ik0dDQkIthtkmmJNN8oy4sV3M7ceLE+O1vfxubNm2KPn36RMTOS+9069YtBg0alJex701ppleH77M9Jk2aFNu2bYuInffVyIdczHVFRUU8++yzTZbdcMMN8fDDD8cdd9yRDTM7UvfKyg7fZ3u0d66PPfbY6NevX8yePXuXwOPuu++O5cuXZy9/dcQRR0RdXV08/fTT8U//9E8REbFo0aKoq6vbbVjSGkuWLMmeRwAAAAC0jUtatUFDQ8Mu12svLi5ucimV+vr6Xdr07t07Kioq4thjj40xY8bEtGnT4j//8z9j/fr1ceGFF8ZZZ50VFRUV2fbPP/98bNu2LdavXx/19fWxdOnSiIgYN25cROz8a+YhQ4bEqFGjIiJiwYIF8YMf/CDOP//8PBz1vqGzzO3UqVPjiiuuiDPPPDMuv/zyePvtt2PWrFkxc+ZMl7Nqge7du8eyZcuy/96dzjDX3bp1i7Fjxzbpv7q6Onr16rXLcnavvXNdVlYWP/nJT+KUU06Js88+O84777yoqKiIP/7xjzFr1qz44he/GCeddFJE7PyW1aRJk+Kss86Kn/zkJxERcfbZZ8cJJ5wQI0eOzPb90ksvxaZNm2LNmjWxZcuW7JyPGTMmevbsGT/72c+iR48eMX78+OjWrVvcc889ce2118b3v//9XD88AAAAAPuWhF1Mnz49mTJlyh7XRcQuPyNHjsy2GTp06G7bnHPOOdk2r776anL88ccnpaWlSb9+/ZLzzjsv2bp1a5N97amf91177bXJwQcfnPTu3TupqKhIxo8fn9xwww3Jjh07cvuAdCFpmdskSZJly5Yln/nMZ5LS0tJk0KBByTe+8Y1k8+bNuXswupi9zW2SJMmUKVOS6dOnZ9t2prn+oEsvvTQ59NBD2/QY7CtyPddJkiSPP/54MmnSpCSTySQ9e/ZMxowZk/zgBz9I3nvvvSbtamtrk9NOOy0pLy9PysvLk9NOOy3ZsGFDkzZHHXXUbve5YsWKJEmS5L//+7+T0aNHJ717907Ky8uTww47LPnFL37R3ocFAAAAYJ9XlCR7uFMrAAAAAABASnQr9AAAAAAAAADaS+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6hXnq+PGxsZ48803o7y8PIqKivK1GwAAAAAAIAWSJIn6+voYOHBgdOuW++9j5C3wePPNN2Pw4MH56h4AAAAAAEih119/PQYNGpTzfvMWeJSXl0fEzoFXVFTkazcAAAAAAEAKbNy4MQYPHpzND3Itb4HH+5exqqioEHgAAAAAAAAREXm7DYablgMAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEi9vAce767fnO9dQJe0avUb8YPfzo5Vq98o9FDoYtZvXR+/WjY31m9dHxE76/Rfbv9rTut1PvpMsx1r18bG2dfEjrVrCz2UnOkKx5Sr8zRN5/uHn//tbQe5lo9z74N9vl3fEDc/8lK8Xd+Qs/7Zt3XkOaU2A+RPV6mxXeU4IrrWsdCx8h54bHlna753AV3Smto18XiPh2NN7ZpCD4UuZsPW9THvhV/Fhv//TcPmDVti8bxnY/OGLTnbRz76TLMd69ZF/TVzYse6dYUeSs50hWPK1XmapvP9w8//9raDXMvHuffBPt+ub4hbHn1Z4EHOdOQ5pTYD5E9XqbFd5Tgiutax0LFc0goAAAAAAEg9gQcAAAAAAJB6xfneQcO722JLnctaQWtt27w9IiLefe/dqGuoK/Bo6Eo2bdu02+UNm3JXrxs2bctJP11N4zt1saO2ttDDyInGd7pOXWrvuZ/G833Ttk17fW3ZU52AjtLcOdravj6sfsv22PBu+p67dD71W7Z3+D5z+fwAYKeu9v63K7xWdLU5oePkPfC4/8rHorRHab53A11OXeX6iMkRs1d+P2JloUfDvuDe7/6x0EPo8mpPObXQQ2A39sVz/5KF3yn0EGCv8n2Onv/zv+S1f8gnNRyA5nitYF/mklYAAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpl/d7eBz3naNi2Ngh+d4NdDlLXlgaC968P7457KIYN/rQQg+HLmRl3YrdXs/z+P/4dFQO65uTfdSu3LBP3hehOZXzbo8eY0YXehg5sf35ZV3mniTtPffTeL5fceSVMSxz4B7X76lOQEdp7hxtjd2dz9edMSGG15TnpH/2bS+tqe/we8Lk8vkBwE5d7f1vV3it6GpzQsfJe+BRUtYzSjO98r0b6HJ69u4RERFlxWWRKckUeDR0JX169tnt8pI+uavXJX165qSfrqbbfpnoXllZ6GHkxI79uk5dau+5n8bzvU/PPnt9bdlTnYCO0tw52tq+Pqy8tEf0LUvfc5fOp7y0R4fvM5fPDwB26mrvf7vCa0VXmxM6jktaAQAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASL28Bx6l+7lhObRFTWVNfHL7MVFTWVPoodDF9O3VL04ZOTX69uoXERG9+5bGYaccEr37luZsH/noM826V1dH+Te+Ht2rqws9lJzpCseUq/M0Tef7h5//7W0HuZaPc++DfVaVl8SXP/WRqCovyVn/7Ns68pxSmwHyp6vU2K5yHBFd61joWEVJkiT56Hjjxo2RyWSirq4uKioq8rELAAAAAAAgJfKdG7ikFQAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw6ALWb10fv1o2N9ZvXd8p+sm33Y2zvWN/u74hbn7kpXi7viEn7XIlX/vr6OMg/8xpunWW+ess4wA6H/WBQmnJ+/y2fBbYsXZtbJx9TexYu7ZN42rv9sBOrXl98VrUsQrxeJvj3POY0hnV5vl8FHh0ARu2ro95L/wqNrQzqMhVP/m2u3G2d+xv1zfELY++3KLAoyXtciVf++vo4yD/zGm6dZb56yzjADof9YFCacn7/LZ8Ftixbl3UXzMndqxb16ZxtXd7YKfWvL54LepYhXi8zXHueUzpjGo3CTwAAAAAAAD2SuABAAAAAACknsADAAAAAABIveJCD4Dc2bRtU9Q11LVr+zT54PHmauz1W7bHhne37XV9ITQ3rrb0R9eU63OFjtHZnpPOI+DDOludYt+zt8867fks0PhOXeyorW3TdkDutOT9p9eiwujIzwbmOH98xqMzqd/yXl77F3h0IZcs/E6hh9Ch8nG85//8LznvMxc667jofJwr5ILzCIDOJl+fdWpPOTUv/QKt4/1n52VuugbzSGfyXsO7ee3fJa0AAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABSzz08upArjrwyhmUObPP2K+tWpOo+IB883lyN/bozJsTwmvI9rn9pTX1BrnvY3Lhaq1DHQf7l+lyhY3S256TzCPiwzlan2Pfs7bNOez4LVM67PXqMGd3q7bY/v8z9PyCHWvL+02tRYXTkZwNznD8+49GZLF3+Zhzz/fz1L/DoQvr07BOZkky7tk+TDx5vrsZeXtoj+pb13Ov6QmhuXG3pj64p1+cKHaOzPSedR8CHdbY6xb5nb5912vNZoNt+meheWdnq7Xbs1/bPXcCuWvL+02tRYXTkZwNznD8+49GZlJfmN5JwSSsAAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3h0AX179YtTRk6Nvr36dYp+8m1342zv2KvKS+LLn/pIVJWX5KRdruRrfx19HOSfOU23zjJ/nWUcQOejPlAoLXmf35bPAt2rq6P8G1+P7tXVbRpXe7cHdmrN64vXoo5ViMfbHOeex5TOqLJPfs/HoiRJknx0vHHjxshkMlFXVxcVFRX52AUAAAAAAJAS+c4NfMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/DYh7xd3xA3P/JSvF3f0Kb1AEDE+q3r41fL5sb6retbtBwAAADoGAKPfcjb9Q1xy6Mv7zXw2Nt6ACBiw9b1Me+FX8WGDwUbe1oOAAAAdAyBBwAAAAAAkHoCDwAAAAAAIPWKCz0AOl79lu2x4d1tu10OALTMpm2boq6hrsnvAAAAQOEIPPZB5//8L4UeAgCk3iULv1PoIQAAAAAf4JJWAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqeceHvug686YEMNryndZ/tKaevf3AIAWuuLIK2NY5sDs7yvrVrivBwAAABSQwGMfVF7aI/qW9dztcgCgZfr07BOZkkyT3wEAAIDCcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfw2IdUlZfElz/1kagqL2nTegAgom+vfnHKyKnRt1e/Fi0HAAAAOkZRkiRJPjreuHFjZDKZqKuri4qKinzsAgAAAAAASIl85wa+4QEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1BB4AAAAAAEDqCTwAAAAAAIDUE3gAAAAAAACpJ/AAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOoJPAAAAAAAgNQTeAAAAAAAAKkn8AAAAAAAAFJP4AEAAAAAAKSewAMAAAAAAEg9gQcAAAAAAJB6Ag8AAAAAACD1ivPVcZIkERGxcePGfO0CAAAAAABIiffzgvfzg1zLW+BRW1sbERGDBw/O1y4AAAAAAICUqa2tjUwmk/N+8xZ49OvXLyIiXnvttbwMHEiHjRs3xuDBg+P111+PioqKQg8HKAB1AFAHgAi1AFAHgIi6uroYMmRINj/ItbwFHt267bw9SCaTUcCAqKioUAtgH6cOAOoAEKEWAOoA8H/5Qc77zUuvAAAAAAAAHUjgAQAAAAAApF7eAo+SkpK49NJLo6SkJF+7AFJALQDUAUAdACLUAkAdAPJfB4qSJEny0jMAAAAAAEAHcUkrAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqdfqwOONN96I008/PSorK6N3794xbty4WLx4cXZ9kiRx2WWXxcCBA6O0tDQ+9alPxd///vcmfTQ0NMT5558fVVVVUVZWFpMnT45Vq1a1/2iADtFcHZg/f34cd9xxUVVVFUVFRbF06dJd+lAHIP32Vgu2b98eF110URxyyCFRVlYWAwcOjDPOOCPefPPNJn2oBZBuzb0nuOyyy2LUqFFRVlYWffv2jc985jOxaNGiJn2oA5BuzdWBDzrnnHOiqKgofvjDHzZZrg5A+jVXC2bMmBFFRUVNfg4//PAmfagFkG4teU+wbNmymDx5cmQymSgvL4/DDz88Xnvttez6XNSBVgUeGzZsiIkTJ0aPHj3iD3/4Qzz//PMxe/bs2G+//bJtrr766rjmmmvi+uuvjz//+c9RU1MT//zP/xz19fXZNv/2b/8Wd955Z8ybNy8WLFgQmzZtihNOOCF27NjRqsEDHa8ldeDdd9+NiRMnxlVXXbXHftQBSLfmasHmzZvjmWeeiUsuuSSeeeaZmD9/frz44osxefLkJv2oBZBeLXlPMGLEiLj++uvj2WefjQULFsSwYcPi2GOPjbfeeivbRh2A9GpJHXjfXXfdFYsWLYqBAwfusk4dgHRraS2YNGlSrF69Ovvz+9//vsl6tQDSqyV14OWXX45PfOITMWrUqHj00Ufjr3/9a1xyySXRq1evbJuc1IGkFS666KLkE5/4xB7XNzY2JjU1NclVV12VXbZ169Ykk8kkP/7xj5MkSZJ33nkn6dGjRzJv3rxsmzfeeCPp1q1bct9997VmOEABNFcHPmjFihVJRCRLlixpslwdgPRrTS1439NPP51ERPLqq68mSaIWQNq1pQ7U1dUlEZE89NBDSZKoA5B2La0Dq1atSg444IDkueeeS4YOHZrMmTMnu04dgPRrSS2YPn16MmXKlD2uVwsg3VpSB04++eTk9NNP3+P6XNWBVn3D4+67744JEybEl770paiuro7x48fHzTffnF2/YsWKWLNmTRx77LHZZSUlJXHUUUfFwoULIyJi8eLFsX379iZtBg4cGGPHjs22ATqv5upAS6gDkH5tqQV1dXVRVFSU/QsPtQDSrbV1YNu2bXHTTTdFJpOJQw89NCLUAUi7ltSBxsbGmDZtWsyaNSsOPvjgXfpQByD9Wvqe4NFHH43q6uoYMWJEnHXWWbFu3brsOrUA0q25OtDY2Bj33ntvjBgxIo477riorq6Oj3/843HXXXdl2+SqDrQq8HjllVfixhtvjIMOOijuv//+OPfcc+OCCy6In//85xERsWbNmoiI6N+/f5Pt+vfvn123Zs2a6NmzZ/Tt23ePbYDOq7k60BLqAKRfa2vB1q1b4+KLL46pU6dGRUVFRKgFkHYtrQO/+93vok+fPtGrV6+YM2dOPPjgg1FVVRUR6gCkXUvqwPe///0oLi6OCy64YLd9qAOQfi2pBZ/97Gdj7ty58fDDD8fs2bPjz3/+cxxzzDHR0NAQEWoBpF1zdWDdunWxadOmuOqqq2LSpEnxwAMPxBe+8IU48cQT47HHHouI3NWB4tYMvLGxMSZMmBDf+973IiJi/Pjx8fe//z1uvPHGOOOMM7LtioqKmmyXJMkuyz6sJW2AwmtpHWgLdQDSozW1YPv27XHKKadEY2Nj3HDDDc32rRZAOrS0Dhx99NGxdOnSePvtt+Pmm2+Ok046KRYtWhTV1dV77FsdgHRorg4sXrw4/uu//iueeeaZVj+n1QFIj5a8Jzj55JOz7ceOHRsTJkyIoUOHxr333hsnnnjiHvtWCyAdmqsDjY2NERExZcqU+PrXvx4REePGjYuFCxfGj3/84zjqqKP22Hdr60CrvuExYMCAGDNmTJNlo0ePzt5JvaamJiJil8Rl3bp12W991NTUxLZt22LDhg17bAN0Xs3VgZZQByD9WloLtm/fHieddFKsWLEiHnzwwey3OyLUAki7ltaBsrKyGD58eBx++OFxyy23RHFxcdxyyy0RoQ5A2jVXB5544olYt25dDBkyJIqLi6O4uDheffXV+OY3vxnDhg2LCHUAuoK2/D/BgAEDYujQobF8+fKIUAsg7ZqrA1VVVVFcXNxstpCLOtCqwGPixInxwgsvNFn24osvxtChQyMi4sADD4yampp48MEHs+u3bdsWjz32WBx55JEREXHYYYdFjx49mrRZvXp1PPfcc9k2QOfVXB1oCXUA0q8lteD9sGP58uXx0EMPRWVlZZP2agGkW1vfEyRJkr18hToA6dZcHZg2bVr87W9/i6VLl2Z/Bg4cGLNmzYr7778/ItQB6Ara8p6gtrY2Xn/99RgwYEBEqAWQds3VgZ49e8Y//uM/7rVNzupAi29vniTJ008/nRQXFydXXnllsnz58mTu3LlJ7969k1/+8pfZNldddVWSyWSS+fPnJ88++2xy6qmnJgMGDEg2btyYbXPuuecmgwYNSh566KHkmWeeSY455pjk0EMPTd57773WDAcogJbUgdra2mTJkiXJvffem0REMm/evGTJkiXJ6tWrs23UAUi35mrB9u3bk8mTJyeDBg1Kli5dmqxevTr709DQkO1HLYD0aq4ObNq0Kfn2t7+d/OlPf0pWrlyZLF68OPnyl7+clJSUJM8991y2H3UA0qslnw0+bOjQocmcOXOaLFMHIN2aqwX19fXJN7/5zWThwoXJihUrkkceeSQ54ogjkgMOOMD/F0IX0ZL3BPPnz0969OiR3HTTTcny5cuT6667LunevXvyxBNPZNvkog60KvBIkiS55557krFjxyYlJSXJqFGjkptuuqnJ+sbGxuTSSy9NampqkpKSkuSTn/xk8uyzzzZps2XLluS8885L+vXrl5SWliYnnHBC8tprr7V2KECBNFcHbrvttiQidvm59NJLs23UAUi/vdWCFStW7LYORETyyCOPZNupBZBue6sDW7ZsSb7whS8kAwcOTHr27JkMGDAgmTx5cvL000836UMdgHRr7rPBh+0u8FAHIP32Vgs2b96cHHvsscn++++f9OjRIxkyZEgyffr0XZ7nagGkW0veE9xyyy3J8OHDk169eiWHHnpoctdddzVZn4s6UJQkSdKKb6cAAAAAAAB0Oq26hwcAAAAAAEBnJPAAAAAAAABST+ABAAAAAACknsADAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AACAdrvsssti3LhxhR4GAACwDytKkiQp9CAAAIDOq6ioaK/rp0+fHtdff300NDREZWVlB40KAACgKYEHAACwV2vWrMn++9e//nV897vfjRdeeCG7rLS0NDKZTCGGBgAAkOWSVgAAwF7V1NRkfzKZTBQVFe2y7MOXtJoxY0Z8/vOfj+9973vRv3//2G+//eLyyy+P9957L2bNmhX9+vWLQYMGxa233tpkX2+88UacfPLJ0bdv36isrIwpU6bEypUrO/aAAQCAVBJ4AAAAefHwww/Hm2++GY8//nhcc801cdlll8UJJ5wQffv2jUWLFsW5554b5557brz++usREbF58+Y4+uijo0+fPvH444/HggULok+fPjFp0qTYtm1bgY8GAADo7AQeAABAXvTr1y+uvfbaGDlyZMycOTNGjhwZmzdvjn//93+Pgw46KL797W9Hz54948knn4yIiHnz5kW3bt3ipz/9aRxyyCExevTouO222+K1116LRx99tLAHAwAAdHrFhR4AAADQNR188MHRrdv//Y1V//79Y+zYsdnfu3fvHpWVlbFu3bqIiFi8eHG89NJLUV5e3qSfrVu3xssvv9wxgwYAAFJL4AEAAORFjx49mvxeVFS022WNjY0REdHY2BiHHXZYzJ07d5e+9t9///wNFAAA6BIEHgAAQKfwsY99LH79619HdXV1VFRUFHo4AABAyriHBwAA0CmcdtppUVVVFVOmTIknnngiVqxYEY899lh87Wtfi1WrVhV6eAAAQCcn8AAAADqF3r17x+OPPx5DhgyJE088MUaPHh0zZ86MLVu2+MYHAADQrKIkSZJCDwIAAAAAAKA9fMMDAAAAAABIPYEHAAAAAACQegIPAAAAAAAg9QQeAAAAAABA6gk8AAAAAACA1BN4AAAAAAAAqSfwAAAAAAAAUk/gAQAAAAAApJ7AAwAAAAAASD2BBwAAAAAAkHoCDwAAAAAAIPUEHgAAAAAAQOr9fw+gShyFf/1LAAAAAElFTkSuQmCC",
"text/plain": [
""
]
@@ -604,8 +604,8 @@
"metadata": {
"accelerator": "GPU",
"colab": {
- "provenance": [],
- "include_colab_link": true
+ "include_colab_link": true,
+ "provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
@@ -1062,4 +1062,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
-}
\ No newline at end of file
+}
diff --git a/version.txt b/version.txt
index fd2a01863..94ff29cc4 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-3.1.0
+3.1.1
From c657362cccc9baa74106d0413d0a4527669874e6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Fri, 1 Dec 2023 14:21:52 +0100
Subject: [PATCH 26/57] doc: update changelog
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3e0a93dbe..777f41f38 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,6 @@
# Changelog
-## `develop` branch
+## Version 3.1.1 (2023-12-01)
### TL;DR
From 66dd72bb2b807aaf6d011c89678d85b51fb3b859 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Fri, 15 Dec 2023 16:10:51 +0100
Subject: [PATCH 27/57] feat(model): add `num_frames` and `receptive_field` to
segmentation models
Co-authored-by: Bilal Rahou
---
CHANGELOG.md | 6 ++
pyannote/audio/models/blocks/sincnet.py | 89 ++++++++++++++++++-
pyannote/audio/models/segmentation/PyanNet.py | 31 +++++++
.../audio/models/segmentation/SSeRiouSS.py | 78 ++++++++++++++++
pyannote/audio/models/segmentation/debug.py | 77 ++++++++++++++++
pyannote/audio/utils/frame.py | 80 +++++++++++++++++
6 files changed, 359 insertions(+), 2 deletions(-)
create mode 100644 pyannote/audio/utils/frame.py
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 777f41f38..ed30b980a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,11 @@
# Changelog
+## develop branch
+
+### New features
+
+- feat(model): add `num_frames` and `receptive_field` to segmentation models
+
## Version 3.1.1 (2023-12-01)
### TL;DR
diff --git a/pyannote/audio/models/blocks/sincnet.py b/pyannote/audio/models/blocks/sincnet.py
index 65bd6e57f..33e312ba5 100644
--- a/pyannote/audio/models/blocks/sincnet.py
+++ b/pyannote/audio/models/blocks/sincnet.py
@@ -28,6 +28,9 @@
import torch.nn as nn
import torch.nn.functional as F
from asteroid_filterbanks import Encoder, ParamSincFB
+from pyannote.core import SlidingWindow
+
+from pyannote.audio.utils.frame import conv1d_num_frames, conv1d_receptive_field_size
class SincNet(nn.Module):
@@ -35,10 +38,11 @@ def __init__(self, sample_rate: int = 16000, stride: int = 1):
super().__init__()
if sample_rate != 16000:
- raise NotImplementedError("PyanNet only supports 16kHz audio for now.")
+ raise NotImplementedError("SincNet only supports 16kHz audio for now.")
# TODO: add support for other sample rate. it should be enough to multiply
# kernel_size by (sample_rate / 16000). but this needs to be double-checked.
+ self.sample_rate = sample_rate
self.stride = stride
self.wav_norm1d = nn.InstanceNorm1d(1, affine=True)
@@ -70,6 +74,88 @@ def __init__(self, sample_rate: int = 16000, stride: int = 1):
self.pool1d.append(nn.MaxPool1d(3, stride=3, padding=0, dilation=1))
self.norm1d.append(nn.InstanceNorm1d(60, affine=True))
+ def num_frames(self, num_samples: int) -> int:
+ """Compute number of output frames for a given number of input samples
+
+ Parameters
+ ----------
+ num_samples : int
+ Number of input samples
+
+ Returns
+ -------
+ num_frames : int
+ Number of output frames
+ """
+
+ kernel_size = [251, 3, 5, 3, 5, 3]
+ stride = [self.stride, 3, 1, 3, 1, 3]
+ padding = [0, 0, 0, 0, 0, 0]
+ dilation = [1, 1, 1, 1, 1, 1]
+
+ num_frames = num_samples
+ for k, s, p, d in zip(kernel_size, stride, padding, dilation):
+ num_frames = conv1d_num_frames(
+ num_frames, kernel_size=k, stride=s, padding=p, dilation=d
+ )
+
+ return num_frames
+
+ def receptive_field_size(self, num_frames: int = 1) -> int:
+ """Compute receptive field size
+
+ Parameters
+ ----------
+ num_frames : int, optional
+ Number of frames in the output signal
+
+ Returns
+ -------
+ receptive_field_size : int
+ Receptive field size
+ """
+
+ kernel_size = [251, 3, 5, 3, 5, 3]
+ stride = [self.stride, 3, 1, 3, 1, 3]
+ padding = [0, 0, 0, 0, 0, 0]
+ dilation = [1, 1, 1, 1, 1, 1]
+
+ receptive_field_size = num_frames
+ for k, s, p, d in reversed(list(zip(kernel_size, stride, padding, dilation))):
+ receptive_field_size = conv1d_receptive_field_size(
+ num_frames=receptive_field_size,
+ kernel_size=k,
+ stride=s,
+ padding=p,
+ dilation=d,
+ )
+
+ return receptive_field_size
+
+ def receptive_field(self) -> SlidingWindow:
+ """Compute receptive field
+
+ Returns
+ -------
+ receptive field : SlidingWindow
+
+ Source
+ ------
+ https://distill.pub/2019/computing-receptive-fields/
+
+ """
+
+ # duration of the receptive field of each output frame
+ duration = self.receptive_field_size() / self.sample_rate
+
+ # step between the receptive field region of two consecutive output frames
+ step = (
+ self.receptive_field_size(num_frames=2)
+ - self.receptive_field_size(num_frames=1)
+ ) / self.sample_rate
+
+ return SlidingWindow(start=0.0, duration=duration, step=step)
+
def forward(self, waveforms: torch.Tensor) -> torch.Tensor:
"""Pass forward
@@ -83,7 +169,6 @@ def forward(self, waveforms: torch.Tensor) -> torch.Tensor:
for c, (conv1d, pool1d, norm1d) in enumerate(
zip(self.conv1d, self.pool1d, self.norm1d)
):
-
outputs = conv1d(outputs)
# https://github.com/mravanelli/SincNet/issues/4
diff --git a/pyannote/audio/models/segmentation/PyanNet.py b/pyannote/audio/models/segmentation/PyanNet.py
index 5af3734b1..2c4443e06 100644
--- a/pyannote/audio/models/segmentation/PyanNet.py
+++ b/pyannote/audio/models/segmentation/PyanNet.py
@@ -27,6 +27,7 @@
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
+from pyannote.core import SlidingWindow
from pyannote.core.utils.generators import pairwise
from pyannote.audio.core.model import Model
@@ -157,6 +158,36 @@ def build(self):
self.classifier = nn.Linear(in_features, out_features)
self.activation = self.default_activation()
+ def num_frames(self, num_samples: int) -> int:
+ """Compute number of output frames for a given number of input samples
+
+ Parameters
+ ----------
+ num_samples : int
+ Number of input samples
+
+ Returns
+ -------
+ num_frames : int
+ Number of output frames
+ """
+
+ return self.sincnet.num_frames(num_samples)
+
+ def receptive_field(self) -> SlidingWindow:
+ """Compute receptive field
+
+ Returns
+ -------
+ receptive field : SlidingWindow
+
+ Source
+ ------
+ https://distill.pub/2019/computing-receptive-fields/
+
+ """
+ return self.sincnet.receptive_field()
+
def forward(self, waveforms: torch.Tensor) -> torch.Tensor:
"""Pass forward
diff --git a/pyannote/audio/models/segmentation/SSeRiouSS.py b/pyannote/audio/models/segmentation/SSeRiouSS.py
index 7cd545177..9ba656182 100644
--- a/pyannote/audio/models/segmentation/SSeRiouSS.py
+++ b/pyannote/audio/models/segmentation/SSeRiouSS.py
@@ -27,10 +27,12 @@
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
+from pyannote.core import SlidingWindow
from pyannote.core.utils.generators import pairwise
from pyannote.audio.core.model import Model
from pyannote.audio.core.task import Task
+from pyannote.audio.utils.frame import conv1d_num_frames, conv1d_receptive_field_size
from pyannote.audio.utils.params import merge_dict
@@ -191,6 +193,82 @@ def build(self):
self.classifier = nn.Linear(in_features, out_features)
self.activation = self.default_activation()
+ def num_frames(self, num_samples: int) -> int:
+ """Compute number of output frames for a given number of input samples
+
+ Parameters
+ ----------
+ num_samples : int
+ Number of input samples
+
+ Returns
+ -------
+ num_frames : int
+ Number of output frames
+ """
+
+ num_frames = num_samples
+ for conv_layer in self.wav2vec.feature_extractor.conv_layers:
+ num_frames = conv1d_num_frames(
+ num_frames,
+ kernel_size=conv_layer.kernel_size,
+ stride=conv_layer.stride,
+ padding=conv_layer.conv.padding[0],
+ dilation=conv_layer.conv.dilation[0],
+ )
+
+ return num_frames
+
+ def receptive_field_size(self, num_frames: int = 1) -> int:
+ """Compute receptive field size
+
+ Parameters
+ ----------
+ num_frames : int, optional
+ Number of frames in the output signal
+
+ Returns
+ -------
+ receptive_field_size : int
+ Receptive field size
+ """
+
+ receptive_field_size = num_frames
+ for conv_layer in reversed(self.wav2vec.feature_extractor.conv_layers):
+ receptive_field_size = conv1d_receptive_field_size(
+ num_frames=receptive_field_size,
+ kernel_size=conv_layer.kernel_size,
+ stride=conv_layer.stride,
+ padding=conv_layer.conv.padding[0],
+ dilation=conv_layer.conv.dilation[0],
+ )
+
+ return receptive_field_size
+
+ def receptive_field(self) -> SlidingWindow:
+ """Compute receptive field
+
+ Returns
+ -------
+ receptive field : SlidingWindow
+
+ Source
+ ------
+ https://distill.pub/2019/computing-receptive-fields/
+
+ """
+
+ # duration of the receptive field of each output frame
+ duration = self.receptive_field_size() / self.hparams.sample_rate
+
+ # step between the receptive field region of two consecutive output frames
+ step = (
+ self.receptive_field_size(num_frames=2)
+ - self.receptive_field_size(num_frames=1)
+ ) / self.hparams.sample_rate
+
+ return SlidingWindow(start=0.0, duration=duration, step=step)
+
def forward(self, waveforms: torch.Tensor) -> torch.Tensor:
"""Pass forward
diff --git a/pyannote/audio/models/segmentation/debug.py b/pyannote/audio/models/segmentation/debug.py
index 89512320c..a230bf768 100644
--- a/pyannote/audio/models/segmentation/debug.py
+++ b/pyannote/audio/models/segmentation/debug.py
@@ -26,6 +26,7 @@
import torch
import torch.nn as nn
from einops import rearrange
+from pyannote.core import SlidingWindow
from torchaudio.transforms import MFCC
from pyannote.audio.core.model import Model
@@ -57,6 +58,82 @@ def __init__(
bidirectional=True,
)
+ def num_frames(self, num_samples: int) -> int:
+ """Compute number of output frames for a given number of input samples
+
+ Parameters
+ ----------
+ num_samples : int
+ Number of input samples
+
+ Returns
+ -------
+ num_frames : int
+ Number of output frames
+
+ Source
+ ------
+ https://pytorch.org/docs/stable/generated/torch.stft.html#torch.stft
+
+ """
+
+ hop_length = self.mfcc.MelSpectrogram.spectrogram.hop_length
+ n_fft = self.mfcc.MelSpectrogram.spectrogram.n_fft
+ center = self.mfcc.MelSpectrogram.spectrogram.center
+ return (
+ 1 + num_samples // hop_length
+ if center
+ else 1 + (num_samples - n_fft) // hop_length
+ )
+
+ def receptive_field_size(self, num_frames: int = 1) -> int:
+ """Compute receptive field size
+
+ Parameters
+ ----------
+ num_frames : int, optional
+ Number of frames in the output signal
+
+ Returns
+ -------
+ receptive_field_size : int
+ Receptive field size
+ """
+
+ hop_length = self.mfcc.MelSpectrogram.spectrogram.hop_length
+ n_fft = self.mfcc.MelSpectrogram.spectrogram.n_fft
+ center = self.mfcc.MelSpectrogram.spectrogram.center
+
+ if center:
+ return (num_frames - 1) * hop_length
+ else:
+ return (num_frames - 1) * hop_length + n_fft
+
+ def receptive_field(self) -> SlidingWindow:
+ """Compute receptive field
+
+ Returns
+ -------
+ receptive field : SlidingWindow
+
+ Source
+ ------
+ https://distill.pub/2019/computing-receptive-fields/
+
+ """
+
+ # duration of the receptive field of each output frame
+ duration = (
+ self.mfcc.MelSpectrogram.spectrogram.win_length / self.hparams.sample_rate
+ )
+
+ # step between the receptive field region of two consecutive output frames
+ step = (
+ self.mfcc.MelSpectrogram.spectrogram.hop_length / self.hparams.sample_rate
+ )
+
+ return SlidingWindow(start=0.0, duration=duration, step=step)
+
def build(self):
# define task-dependent layers
diff --git a/pyannote/audio/utils/frame.py b/pyannote/audio/utils/frame.py
new file mode 100644
index 000000000..e3987873d
--- /dev/null
+++ b/pyannote/audio/utils/frame.py
@@ -0,0 +1,80 @@
+# MIT License
+#
+# Copyright (c) 2023 CNRS
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+
+import math
+
+
+def conv1d_num_frames(num_samples, kernel_size=5, stride=1, padding=0, dilation=1):
+ """Compute expected number of frames after 1D convolution
+
+ Parameters
+ ----------
+ num_samples : int
+ Number of samples in the input signal
+ kernel_size : int
+ Kernel size
+ stride : int
+ Stride
+ padding : int
+ Padding
+ dilation : int
+ Dilation
+
+ Returns
+ -------
+ num_frames : int
+ Number of frames in the output signal
+
+ Source
+ ------
+ https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html#torch.nn.Conv1d
+ """
+ return math.floor(
+ 1 + (num_samples + 2 * padding - dilation * (kernel_size - 1) - 1) / stride
+ )
+
+
+def conv1d_receptive_field_size(
+ num_frames=1, kernel_size=5, stride=1, padding=0, dilation=1
+):
+ """Compute receptive field size for `num_frames` frames after 1D convolution
+
+ Parameters
+ ----------
+ num_frames : int, optional
+ Number of frames in the output signal
+ kernel_size : int
+ Kernel size
+ stride : int
+ Stride
+ padding : int
+ Padding
+ dilation : int
+ Dilation
+
+ Returns
+ -------
+ receptive_field : int
+ Receptive field size
+ """
+ return (num_frames - 1) * stride - 2 * padding + dilation * (kernel_size - 1) + 1
From 4d2d16bd6101899171013dcb014b17418ed50d88 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Wed, 20 Dec 2023 16:03:13 +0100
Subject: [PATCH 28/57] doc: update benchmark section (#1592)
---
README.md | 26 ++++++++++++++------------
1 file changed, 14 insertions(+), 12 deletions(-)
diff --git a/README.md b/README.md
index a82a2488f..696fb219d 100644
--- a/README.md
+++ b/README.md
@@ -78,18 +78,20 @@ Out of the box, `pyannote.audio` speaker diarization [pipeline](https://hf.co/py
Those numbers are diarization error rates (in %):
| Benchmark | [v2.1](https://hf.co/pyannote/speaker-diarization-2.1) | [v3.1](https://hf.co/pyannote/speaker-diarization-3.1) | [Premium](https://forms.gle/eKhn7H2zTa68sMMx8) |
-| ---------------------- | ------------------------------------------------------ | ------------------------------------------------------ | ---------------------------------------------- |
-| AISHELL-4 | 14.1 | 12.3 | 11.9 |
-| AliMeeting (channel 1) | 27.4 | 24.5 | 22.5 |
-| AMI (IHM) | 18.9 | 18.8 | 16.6 |
-| AMI (SDM) | 27.1 | 22.6 | 20.9 |
-| AVA-AVD | 66.3 | 50.0 | 39.8 |
-| CALLHOME (part 2) | 31.6 | 28.4 | 22.2 |
-| DIHARD 3 (full) | 26.9 | 21.4 | 17.2 |
-| Ego4D (dev.) | 61.5 | 51.2 | 43.8 |
-| MSDWild | 32.8 | 25.4 | 19.8 |
-| REPERE (phase2) | 8.2 | 7.8 | 7.6 |
-| VoxConverse (v0.3) | 11.2 | 11.2 | 9.4 |
+| ---------------------- | ------ | ------ | --------- |
+| [AISHELL-4](https://arxiv.org/abs/2104.03603) | 14.1 | 12.2 | 11.9 |
+| [AliMeeting](https://www.openslr.org/119/) (channel 1) | 27.4 | 24.4 | 22.5 |
+| [AMI](https://groups.inf.ed.ac.uk/ami/corpus/) (IHM) | 18.9 | 18.8 | 16.6 |
+| [AMI](https://groups.inf.ed.ac.uk/ami/corpus/) (SDM) | 27.1 | 22.4 | 20.9 |
+| [AVA-AVD](https://arxiv.org/abs/2111.14448) | 66.3 | 50.0 | 39.8 |
+| [CALLHOME](https://catalog.ldc.upenn.edu/LDC2001S97) ([part 2](https://github.com/BUTSpeechFIT/CALLHOME_sublists/issues/1)) | 31.6 | 28.4 | 22.2 |
+| [DIHARD 3](https://catalog.ldc.upenn.edu/LDC2022S14) ([full](https://arxiv.org/abs/2012.01477)) | 26.9 | 21.7 | 17.2 |
+| [Earnings21](https://github.com/revdotcom/speech-datasets) | 17.0 | 9.4 | 9.0 |
+| [Ego4D](https://arxiv.org/abs/2110.07058) (dev.) | 61.5 | 51.2 | 43.8 |
+| [MSDWild](https://github.com/X-LANCE/MSDWILD) | 32.8 | 25.3 | 19.8 |
+| [RAMC](https://www.openslr.org/123/) | 22.5 | 22.2 | 18.4 |
+| [REPERE](https://www.islrn.org/resources/360-758-359-485-0/) (phase2) | 8.2 | 7.8 | 7.6 |
+| [VoxConverse](https://github.com/joonson/voxconverse) (v0.3) | 11.2 | 11.3 | 9.4 |
[Diarization error rate](http://pyannote.github.io/pyannote-metrics/reference.html#diarization) (in %)
From 7bd88d5f13063c2aaa0969e9adc7760c389860f9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Wed, 20 Dec 2023 21:26:42 +0100
Subject: [PATCH 29/57] feat(pipeline): add Waveform and SampleRate
preprocessors (#1593)
---
CHANGELOG.md | 3 ++-
pyannote/audio/utils/preprocessors.py | 18 ++++++++++++++++--
2 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ed30b980a..4bcaa93b6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,9 +1,10 @@
# Changelog
-## develop branch
+## develop
### New features
+- feat(pipeline): add `Waveform` and `SampleRate` preprocessors
- feat(model): add `num_frames` and `receptive_field` to segmentation models
## Version 3.1.1 (2023-12-01)
diff --git a/pyannote/audio/utils/preprocessors.py b/pyannote/audio/utils/preprocessors.py
index ce4685d1f..b26553bf9 100644
--- a/pyannote/audio/utils/preprocessors.py
+++ b/pyannote/audio/utils/preprocessors.py
@@ -27,12 +27,13 @@
# Hervé BREDIN - http://herve.niderb.fr
from functools import reduce
-from itertools import chain
from typing import Dict, List, Optional, Set
from pyannote.core import Annotation, Segment
from pyannote.database import ProtocolFile
+from pyannote.audio.core.io import Audio, get_torchaudio_info
+
class LowerTemporalResolution:
"""Artificially degrade temporal resolution of reference annotation
@@ -50,7 +51,6 @@ def __init__(self, resolution: float = 0.1):
self.resolution = resolution
def __call__(self, current_file: ProtocolFile) -> Annotation:
-
annotation = current_file["annotation"]
new_annotation = annotation.empty()
@@ -128,3 +128,17 @@ def __call__(self, current_file: ProtocolFile) -> Annotation:
derived[seg] = intersect_label
return derived
+
+
+class Waveform:
+ def __init__(self):
+ self._audio = Audio()
+
+ def __call__(self, file: ProtocolFile):
+ waveform, _ = self._audio(file)
+ return waveform
+
+
+class SampleRate:
+ def __call__(self, file: ProtocolFile):
+ return get_torchaudio_info(file).sample_rate
From 80634c9029743780cc959e0d249cbbb085959545 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Cl=C3=A9ment=20Pag=C3=A9s?=
<55240756+clement-pages@users.noreply.github.com>
Date: Fri, 22 Dec 2023 09:16:12 +0100
Subject: [PATCH 30/57] fix: update `isort` version to 5.12.0 in
pre-commit-config (#1596)
Co-authored-by: clement-pages
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 549e46ad0..92c952bdc 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -14,7 +14,7 @@ repos:
# Sort imports
- repo: https://github.com/PyCQA/isort
- rev: 5.10.1
+ rev: 5.12.0
hooks:
- id: isort
args: ["--profile", "black"]
From e21e7bb35ff7e830f47e6cbfd7923549c2b26a9b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Mon, 8 Jan 2024 09:52:19 +0100
Subject: [PATCH 31/57] ci: deactivate FAQtory
---
.github/workflows/new_issue.yml | 29 -----------------------------
1 file changed, 29 deletions(-)
delete mode 100644 .github/workflows/new_issue.yml
diff --git a/.github/workflows/new_issue.yml b/.github/workflows/new_issue.yml
deleted file mode 100644
index b8477dc16..000000000
--- a/.github/workflows/new_issue.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-name: issues
-on:
- issues:
- types: [opened]
-jobs:
- add-comment:
- runs-on: ubuntu-latest
- permissions:
- issues: write
- steps:
- - uses: actions/checkout@v3
- with:
- ref: develop
- - name: Install FAQtory
- run: pip install FAQtory
- - name: Run Suggest
- env:
- TITLE: ${{ github.event.issue.title }}
- run: faqtory suggest "$TITLE" > suggest.md
- - name: Read suggest.md
- id: suggest
- uses: juliangruber/read-file-action@v1
- with:
- path: ./suggest.md
- - name: Suggest FAQ
- uses: peter-evans/create-or-update-comment@a35cf36e5301d70b76f316e867e7788a55a31dae
- with:
- issue-number: ${{ github.event.issue.number }}
- body: ${{ steps.suggest.outputs.content }}
From 808b170bd6be9581e6763c2bb6d85ab78f5d229f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Mon, 8 Jan 2024 16:36:24 +0100
Subject: [PATCH 32/57] feat: add MRE template
---
tutorials/MRE_template.ipynb | 2220 ++++++++++++++++++++++++++++++++++
1 file changed, 2220 insertions(+)
create mode 100644 tutorials/MRE_template.ipynb
diff --git a/tutorials/MRE_template.ipynb b/tutorials/MRE_template.ipynb
new file mode 100644
index 000000000..7a44c1449
--- /dev/null
+++ b/tutorials/MRE_template.ipynb
@@ -0,0 +1,2220 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "provenance": [],
+ "gpuType": "T4",
+ "authorship_tag": "ABX9TyNUZLZoYLpzG6gIYECEOuiV",
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "language_info": {
+ "name": "python"
+ },
+ "accelerator": "GPU",
+ "widgets": {
+ "application/vnd.jupyter.widget-state+json": {
+ "3d0fe95350234ab599497683ae6d4ce6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_239beda16fde4b1d9e6abfb47f036040",
+ "IPY_MODEL_2d6f5fe6f9ab4a8b853e7b023aaee35f",
+ "IPY_MODEL_771629a0fbab4b0e9ad6425b5affe380"
+ ],
+ "layout": "IPY_MODEL_8763b8e4c8104b879d4324257a810c0f"
+ }
+ },
+ "239beda16fde4b1d9e6abfb47f036040": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_22536aef3997408dbf5ea58241f01e3e",
+ "placeholder": "",
+ "style": "IPY_MODEL_2a6a5f33d7c34b6b996ab7a5b7c2f11d",
+ "value": "config.yaml: 100%"
+ }
+ },
+ "2d6f5fe6f9ab4a8b853e7b023aaee35f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_c348b2f57c6c437fa8a9a2688bf17f4f",
+ "max": 469,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_7526127b349a4467a6d3083d92bef5ec",
+ "value": 469
+ }
+ },
+ "771629a0fbab4b0e9ad6425b5affe380": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_30efc0ed76fb496483d4bb425a930636",
+ "placeholder": "",
+ "style": "IPY_MODEL_e760e231084f4889bb489bc550b7a816",
+ "value": " 469/469 [00:00<00:00, 21.8kB/s]"
+ }
+ },
+ "8763b8e4c8104b879d4324257a810c0f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "22536aef3997408dbf5ea58241f01e3e": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "2a6a5f33d7c34b6b996ab7a5b7c2f11d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "c348b2f57c6c437fa8a9a2688bf17f4f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "7526127b349a4467a6d3083d92bef5ec": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "30efc0ed76fb496483d4bb425a930636": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "e760e231084f4889bb489bc550b7a816": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "9951dce8a6c947dd9a2ed6106cb68f30": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_d68c24e62487484b83bbb06271a6981c",
+ "IPY_MODEL_467f3c9f15184784aa913cb3ae46700f",
+ "IPY_MODEL_c87055ce8cef498f999a20ff2b34e1b8"
+ ],
+ "layout": "IPY_MODEL_63d957e24276476b9bf1be150675217c"
+ }
+ },
+ "d68c24e62487484b83bbb06271a6981c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_afe35fd2ea654d7b861046e847f82e51",
+ "placeholder": "",
+ "style": "IPY_MODEL_c5f4f5f5fbad4d14973dc21eae2358d4",
+ "value": "pytorch_model.bin: 100%"
+ }
+ },
+ "467f3c9f15184784aa913cb3ae46700f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_b4faa4c7fd01498abe07fef6189de4d4",
+ "max": 5905440,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_35ab2642d8a5481780c43eaef7044f3c",
+ "value": 5905440
+ }
+ },
+ "c87055ce8cef498f999a20ff2b34e1b8": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_37de7e413b25451ca5288ea5f43dd2d5",
+ "placeholder": "",
+ "style": "IPY_MODEL_47012c0ff9394d77b8d857a933b4082e",
+ "value": " 5.91M/5.91M [00:00<00:00, 66.3MB/s]"
+ }
+ },
+ "63d957e24276476b9bf1be150675217c": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "afe35fd2ea654d7b861046e847f82e51": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "c5f4f5f5fbad4d14973dc21eae2358d4": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "b4faa4c7fd01498abe07fef6189de4d4": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "35ab2642d8a5481780c43eaef7044f3c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "37de7e413b25451ca5288ea5f43dd2d5": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "47012c0ff9394d77b8d857a933b4082e": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "d3ca3e02944c49f88d2b232295b19293": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_392a6347341f4dac94cad19e1f0bf02b",
+ "IPY_MODEL_ba0102afa62c443eb9de89412301bb46",
+ "IPY_MODEL_b71a286118004001a9a65ece5ba352d2"
+ ],
+ "layout": "IPY_MODEL_29b969a7441d41059969962f338d0def"
+ }
+ },
+ "392a6347341f4dac94cad19e1f0bf02b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_3d0be590ca374ef1a768b8d3577d3105",
+ "placeholder": "",
+ "style": "IPY_MODEL_c0ac81654e1c4904a87a2c777b520c09",
+ "value": "config.yaml: 100%"
+ }
+ },
+ "ba0102afa62c443eb9de89412301bb46": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_36f8eb5005864f779aa8185acf55e2c5",
+ "max": 399,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_5cbb5cf6a37d451c87215851ee8b0b65",
+ "value": 399
+ }
+ },
+ "b71a286118004001a9a65ece5ba352d2": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_ca3ee9d356bc464e82ee061375cc4ef2",
+ "placeholder": "",
+ "style": "IPY_MODEL_67e4c4a2768b40eca34e8add6265c40c",
+ "value": " 399/399 [00:00<00:00, 28.2kB/s]"
+ }
+ },
+ "29b969a7441d41059969962f338d0def": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "3d0be590ca374ef1a768b8d3577d3105": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "c0ac81654e1c4904a87a2c777b520c09": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "36f8eb5005864f779aa8185acf55e2c5": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "5cbb5cf6a37d451c87215851ee8b0b65": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "ca3ee9d356bc464e82ee061375cc4ef2": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "67e4c4a2768b40eca34e8add6265c40c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "2149e37d14e94f82a77386682ba29195": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_501eb1f4c1a8458a86c917d7dac5eeb5",
+ "IPY_MODEL_7a115cf3065f4acdb0ae13577447a333",
+ "IPY_MODEL_532d713a67c94b0d8cf0d61be72c73e7"
+ ],
+ "layout": "IPY_MODEL_3f36703cc5bc4fbcbeb97ab722e573cc"
+ }
+ },
+ "501eb1f4c1a8458a86c917d7dac5eeb5": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_8088c86a3d394f9e88267196b369dfb9",
+ "placeholder": "",
+ "style": "IPY_MODEL_46c5a63bcc3a472284869bcb11407bed",
+ "value": "pytorch_model.bin: 100%"
+ }
+ },
+ "7a115cf3065f4acdb0ae13577447a333": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_754c1729ba074d518557bd53c01e3787",
+ "max": 26645418,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_bbe45325b7b74832bf6fe4a9769e0565",
+ "value": 26645418
+ }
+ },
+ "532d713a67c94b0d8cf0d61be72c73e7": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_9261da2864ce4fd9b0f05f2a2efc54a3",
+ "placeholder": "",
+ "style": "IPY_MODEL_940ed223c43d40e5b85eadf3a5ef0382",
+ "value": " 26.6M/26.6M [00:00<00:00, 172MB/s]"
+ }
+ },
+ "3f36703cc5bc4fbcbeb97ab722e573cc": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "8088c86a3d394f9e88267196b369dfb9": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "46c5a63bcc3a472284869bcb11407bed": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "754c1729ba074d518557bd53c01e3787": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "bbe45325b7b74832bf6fe4a9769e0565": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "9261da2864ce4fd9b0f05f2a2efc54a3": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "940ed223c43d40e5b85eadf3a5ef0382": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "ef97ca55c62b40f7bb233120f8efba62": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HBoxModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_2da74c40ebd44b61adaf840b9e7ce343",
+ "IPY_MODEL_c9517e2631c247b6ba083da05cfd0399",
+ "IPY_MODEL_29b6d0dc6624409c8c8a8e0565c2bc08"
+ ],
+ "layout": "IPY_MODEL_7b6f8f9b2eee485f8defe25d2a9afc4a"
+ }
+ },
+ "2da74c40ebd44b61adaf840b9e7ce343": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_fbaa859f9a8d4d5fb15667ede53a2cfb",
+ "placeholder": "",
+ "style": "IPY_MODEL_1840bc4e8cb54d3d89d5b1f199065c77",
+ "value": "config.yaml: 100%"
+ }
+ },
+ "c9517e2631c247b6ba083da05cfd0399": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "FloatProgressModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_3f955f5b7421415bb79319058d0d094d",
+ "max": 221,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_849cf4fb4ac249368fb8a5c837f8430a",
+ "value": 221
+ }
+ },
+ "29b6d0dc6624409c8c8a8e0565c2bc08": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "HTMLModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_0411eb97e38b4fa081e05f87076ba129",
+ "placeholder": "",
+ "style": "IPY_MODEL_34842bb9a73048caace624929cfc2b03",
+ "value": " 221/221 [00:00<00:00, 10.8kB/s]"
+ }
+ },
+ "7b6f8f9b2eee485f8defe25d2a9afc4a": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "fbaa859f9a8d4d5fb15667ede53a2cfb": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "1840bc4e8cb54d3d89d5b1f199065c77": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "3f955f5b7421415bb79319058d0d094d": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "849cf4fb4ac249368fb8a5c837f8430a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "ProgressStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "0411eb97e38b4fa081e05f87076ba129": {
+ "model_module": "@jupyter-widgets/base",
+ "model_name": "LayoutModel",
+ "model_module_version": "1.2.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "34842bb9a73048caace624929cfc2b03": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_name": "DescriptionStyleModel",
+ "model_module_version": "1.5.0",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ }
+ }
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Sharing a minimal reproduction example (MRE) is a prerequisite for `pyannote.audio` contributors to be able to solve them.\n",
+ "\n",
+ "Having an MRE is very important for contributors to be able to reproduce the bug in the same way that you are experiencing it. When testing a potential fix for the issue, contributors will use the MRE to validate that the fix is working as intended.\n",
+ "\n",
+ "This notebook provides a template that should help you create such a MRE.\n",
+ "\n",
+ "Duplicate it, edit it, and share it as a link within your `pyannote.audio` bug report."
+ ],
+ "metadata": {
+ "id": "SWidE_E7ol-U"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# Setup\n",
+ "\n",
+ "Before anything, make sure to run this section."
+ ],
+ "metadata": {
+ "id": "k1vex_KZTDFm"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Specify the `pyannote.audio` version you found the issue in (including the Git commit hash if using a non-released version)."
+ ],
+ "metadata": {
+ "id": "XRNSJ2omranm"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!pip install -qqq pyannote.audio==3.1.1"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "THKj6xjdSv9k",
+ "outputId": "719baaf8-2028-4b8e-8e46-e6a813aaf6f8"
+ },
+ "execution_count": 1,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m208.7/208.7 kB\u001b[0m \u001b[31m4.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m44.6/44.6 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m15.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m79.5/79.5 kB\u001b[0m \u001b[31m10.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.5/58.5 kB\u001b[0m \u001b[31m8.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m48.1/48.1 kB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m51.4/51.4 kB\u001b[0m \u001b[31m6.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m118.6/118.6 kB\u001b[0m \u001b[31m16.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m630.6/630.6 kB\u001b[0m \u001b[31m21.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m101.7/101.7 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m47.9/47.9 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m806.1/806.1 kB\u001b[0m \u001b[31m25.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m777.7/777.7 kB\u001b[0m \u001b[31m29.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m117.0/117.0 kB\u001b[0m \u001b[31m18.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
+ " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m413.4/413.4 kB\u001b[0m \u001b[31m29.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m39.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m59.6/59.6 kB\u001b[0m \u001b[31m9.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m233.4/233.4 kB\u001b[0m \u001b[31m27.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m116.4/116.4 kB\u001b[0m \u001b[31m14.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m78.6/78.6 kB\u001b[0m \u001b[31m12.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m526.7/526.7 kB\u001b[0m \u001b[31m45.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25h Building wheel for antlr4-python3-runtime (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
+ " Building wheel for docopt (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
+ " Building wheel for julius (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Declare your [Huggingface token](https://huggingface.co/settings/tokens) as `HF_TOKEN` secret by clicking on the 🔑 icon on the left:\n",
+ "\n",
+ "* **Name**: `HF_TOKEN` \n",
+ "* **Value**: your Huggingface token (e.g. `hf_ABCdzRFTkglhlcalBAPGHSQvxLmQs`)"
+ ],
+ "metadata": {
+ "id": "ZLhE8e7iTpTu"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Check that you can load the pretrained pipeline."
+ ],
+ "metadata": {
+ "id": "Mogy5_qYUoXs"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# access your HF token\n",
+ "from google.colab import userdata\n",
+ "hf_token = userdata.get('HF_TOKEN')\n",
+ "\n",
+ "# load the pretrained pipeline\n",
+ "from pyannote.audio import Pipeline\n",
+ "pipeline = Pipeline.from_pretrained(\n",
+ " \"pyannote/speaker-diarization-3.1\",\n",
+ " use_auth_token=hf_token)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 307,
+ "referenced_widgets": [
+ "3d0fe95350234ab599497683ae6d4ce6",
+ "239beda16fde4b1d9e6abfb47f036040",
+ "2d6f5fe6f9ab4a8b853e7b023aaee35f",
+ "771629a0fbab4b0e9ad6425b5affe380",
+ "8763b8e4c8104b879d4324257a810c0f",
+ "22536aef3997408dbf5ea58241f01e3e",
+ "2a6a5f33d7c34b6b996ab7a5b7c2f11d",
+ "c348b2f57c6c437fa8a9a2688bf17f4f",
+ "7526127b349a4467a6d3083d92bef5ec",
+ "30efc0ed76fb496483d4bb425a930636",
+ "e760e231084f4889bb489bc550b7a816",
+ "9951dce8a6c947dd9a2ed6106cb68f30",
+ "d68c24e62487484b83bbb06271a6981c",
+ "467f3c9f15184784aa913cb3ae46700f",
+ "c87055ce8cef498f999a20ff2b34e1b8",
+ "63d957e24276476b9bf1be150675217c",
+ "afe35fd2ea654d7b861046e847f82e51",
+ "c5f4f5f5fbad4d14973dc21eae2358d4",
+ "b4faa4c7fd01498abe07fef6189de4d4",
+ "35ab2642d8a5481780c43eaef7044f3c",
+ "37de7e413b25451ca5288ea5f43dd2d5",
+ "47012c0ff9394d77b8d857a933b4082e",
+ "d3ca3e02944c49f88d2b232295b19293",
+ "392a6347341f4dac94cad19e1f0bf02b",
+ "ba0102afa62c443eb9de89412301bb46",
+ "b71a286118004001a9a65ece5ba352d2",
+ "29b969a7441d41059969962f338d0def",
+ "3d0be590ca374ef1a768b8d3577d3105",
+ "c0ac81654e1c4904a87a2c777b520c09",
+ "36f8eb5005864f779aa8185acf55e2c5",
+ "5cbb5cf6a37d451c87215851ee8b0b65",
+ "ca3ee9d356bc464e82ee061375cc4ef2",
+ "67e4c4a2768b40eca34e8add6265c40c",
+ "2149e37d14e94f82a77386682ba29195",
+ "501eb1f4c1a8458a86c917d7dac5eeb5",
+ "7a115cf3065f4acdb0ae13577447a333",
+ "532d713a67c94b0d8cf0d61be72c73e7",
+ "3f36703cc5bc4fbcbeb97ab722e573cc",
+ "8088c86a3d394f9e88267196b369dfb9",
+ "46c5a63bcc3a472284869bcb11407bed",
+ "754c1729ba074d518557bd53c01e3787",
+ "bbe45325b7b74832bf6fe4a9769e0565",
+ "9261da2864ce4fd9b0f05f2a2efc54a3",
+ "940ed223c43d40e5b85eadf3a5ef0382",
+ "ef97ca55c62b40f7bb233120f8efba62",
+ "2da74c40ebd44b61adaf840b9e7ce343",
+ "c9517e2631c247b6ba083da05cfd0399",
+ "29b6d0dc6624409c8c8a8e0565c2bc08",
+ "7b6f8f9b2eee485f8defe25d2a9afc4a",
+ "fbaa859f9a8d4d5fb15667ede53a2cfb",
+ "1840bc4e8cb54d3d89d5b1f199065c77",
+ "3f955f5b7421415bb79319058d0d094d",
+ "849cf4fb4ac249368fb8a5c837f8430a",
+ "0411eb97e38b4fa081e05f87076ba129",
+ "34842bb9a73048caace624929cfc2b03"
+ ]
+ },
+ "id": "i8rs3XylTTys",
+ "outputId": "b1b7445e-6f29-4ce0-c111-f5ffcb488c47"
+ },
+ "execution_count": 3,
+ "outputs": [
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "config.yaml: 0%| | 0.00/469 [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "3d0fe95350234ab599497683ae6d4ce6"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "stream",
+ "name": "stderr",
+ "text": [
+ "/usr/local/lib/python3.10/dist-packages/pyannote/audio/pipelines/speaker_verification.py:43: UserWarning: torchaudio._backend.get_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n",
+ " backend = torchaudio.get_audio_backend()\n",
+ "/usr/local/lib/python3.10/dist-packages/pyannote/audio/pipelines/speaker_verification.py:53: UserWarning: torchaudio._backend.set_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n",
+ " torchaudio.set_audio_backend(backend)\n",
+ "/usr/local/lib/python3.10/dist-packages/pyannote/audio/tasks/segmentation/mixins.py:37: UserWarning: `torchaudio.backend.common.AudioMetaData` has been moved to `torchaudio.AudioMetaData`. Please update the import path.\n",
+ " from torchaudio.backend.common import AudioMetaData\n"
+ ]
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "pytorch_model.bin: 0%| | 0.00/5.91M [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "9951dce8a6c947dd9a2ed6106cb68f30"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "config.yaml: 0%| | 0.00/399 [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "d3ca3e02944c49f88d2b232295b19293"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "pytorch_model.bin: 0%| | 0.00/26.6M [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "2149e37d14e94f82a77386682ba29195"
+ }
+ },
+ "metadata": {}
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "config.yaml: 0%| | 0.00/221 [00:00, ?B/s]"
+ ],
+ "application/vnd.jupyter.widget-view+json": {
+ "version_major": 2,
+ "version_minor": 0,
+ "model_id": "ef97ca55c62b40f7bb233120f8efba62"
+ }
+ },
+ "metadata": {}
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Check that GPU is available and send pipeline to GPU."
+ ],
+ "metadata": {
+ "id": "SSGSKkLdXheL"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import torch\n",
+ "if torch.cuda.is_available():\n",
+ " gpu = torch.device(\"cuda\")\n",
+ " pipeline.to(gpu)\n",
+ "else:\n",
+ " print(\"Please switch to (free) T4 GPU runtime.\")"
+ ],
+ "metadata": {
+ "id": "vxMDKwA0XcKi"
+ },
+ "execution_count": 4,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Dowload a sample audio file (make sure the download link is public or your bug report will not be reproducible by anyone)."
+ ],
+ "metadata": {
+ "id": "ISngZk15Uzhb"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!wget https://github.com/pyannote/pyannote-audio/raw/develop/tutorials/assets/sample.wav"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "jYfZIys7Te0z",
+ "outputId": "ba64b747-c973-4274-feb4-3b1b4293e529"
+ },
+ "execution_count": 5,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "--2024-01-08 15:31:41-- https://github.com/pyannote/pyannote-audio/raw/develop/tutorials/assets/sample.wav\n",
+ "Resolving github.com (github.com)... 140.82.113.4\n",
+ "Connecting to github.com (github.com)|140.82.113.4|:443... connected.\n",
+ "HTTP request sent, awaiting response... 302 Found\n",
+ "Location: https://raw.githubusercontent.com/pyannote/pyannote-audio/develop/tutorials/assets/sample.wav [following]\n",
+ "--2024-01-08 15:31:41-- https://raw.githubusercontent.com/pyannote/pyannote-audio/develop/tutorials/assets/sample.wav\n",
+ "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
+ "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
+ "HTTP request sent, awaiting response... 200 OK\n",
+ "Length: 960104 (938K) [audio/wav]\n",
+ "Saving to: ‘sample.wav’\n",
+ "\n",
+ "sample.wav 100%[===================>] 937.60K --.-KB/s in 0.04s \n",
+ "\n",
+ "2024-01-08 15:31:42 (25.7 MB/s) - ‘sample.wav’ saved [960104/960104]\n",
+ "\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Apply the pretrained pipeline and visualize the output."
+ ],
+ "metadata": {
+ "id": "RH6AEClgaAgU"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "diarization = pipeline(\"sample.wav\")"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "AoCal-3UXK0z",
+ "outputId": "59428f0f-af42-4e78-fc04-abd6324c73db"
+ },
+ "execution_count": 6,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stderr",
+ "text": [
+ "/usr/local/lib/python3.10/dist-packages/pyannote/audio/utils/reproducibility.py:74: ReproducibilityWarning: TensorFloat-32 (TF32) has been disabled as it might lead to reproducibility issues and lower accuracy.\n",
+ "It can be re-enabled by calling\n",
+ " >>> import torch\n",
+ " >>> torch.backends.cuda.matmul.allow_tf32 = True\n",
+ " >>> torch.backends.cudnn.allow_tf32 = True\n",
+ "See https://github.com/pyannote/pyannote-audio/issues/1370 for more details.\n",
+ "\n",
+ " warnings.warn(\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "diarization"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 259
+ },
+ "id": "iIKvKZoZXN_M",
+ "outputId": "e7dcd7e1-0512-425c-b46a-6dc18d7a67b1"
+ },
+ "execution_count": 7,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABiIAAADyCAYAAADAzN2uAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAe8UlEQVR4nO3de5RV5X038O/hKnEuCjgzoCPiJV4SiMamikmMQQWV5RKlpsalkWhkhYV0qWnkjUWNsZqG9aY2qdrc8NIgxmWjJjG1uViwJqBGG0MxKY0UoykyIMhw0QGEef/wZepkCJyR2XMG5vNZa9Zi9n7Os3/n8Jxn79nfc/Yutba2tgYAAAAAAKAAfSpdAAAAAAAAsPcSRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIURRAAAAAAAAIXZ64OIVatWZerUqTn44IMzcODANDQ0ZPz48fn5z3+eJDnkkENSKpVSKpWy77775v3vf38eeOCBtsd//vOfb1v/9p+jjjqqw7buu+++9O3bN9OmTeuwbv78+SmVSlm7dm3bsuXLl2fUqFE5+eST09zc3NZmRz8rVqzoUE/fvn3T2NiYKVOmZM2aNWW/Ji0tLZk2bVqGDBmSqqqqTJo0KU1NTe3avPTSS5kwYULe9a53pa6uLp/97Gfz5ptvlr2N3sY466iccfYXf/EXOf744zNw4MAce+yxZffdWxlnHe1qnP3qV7/Kxz/+8TQ2NmbQoEE5+uij85WvfKXs/gEAAADYff12t4PmTc1dUUdZagfWdvoxkyZNyubNm3PPPffk0EMPTVNTUx577LGsXr26rc0XvvCFXH755Vm3bl2+/OUv58///M9z4IEH5qSTTkqSvOc978lPf/rTdv3269fxpZs9e3auueaafP3rX8+Xv/zl7LPPPn+0rqVLl+b000/PMccckwceeCCDBg1qW7dkyZLU1NS0a19XV9f27+31bN26Nb/5zW9y6aWXprm5Offff39Zr8lVV12VH/7wh3nggQdSW1ubK664Iuedd17bycytW7dmwoQJaWhoyIIFC/LKK6/kE5/4RPr3759bbrmlrG10pa1v+7/qDn2HDOn0Y4yzjnY1zra79NJL89RTT2XRokVl9Vuk1zZu7rZt7b/vgE4/xjjraFfj7Nlnn01dXV3mzJmTxsbGLFiwIFOmTEnfvn1zxRVXlLUNAAAAAHbPbgcRFz96YVfUUZbvT/xhp9qvXbs2TzzxRObPn5+PfOQjSZIRI0bkT//0T9u1q66uTkNDQxoaGnL77bdnzpw5+cEPftB24q5fv35paGjY6baWLVuWBQsW5Lvf/W7mzZuXBx98MBdeuOPXZtGiRRk/fnzGjh2be+65p8NJwLq6uuy3335/dFtvr+fAAw/M+eefn7vuumun9W3X3Nyc2bNnZ+7cuRk7dmyS5K677srRRx+dJ598MieeeGJ+/OMf59e//nV++tOfpr6+Pscee2xuuummzJgxI5///OczYEDnT6DujhWjj+3W7R34Py93qr1x1lE54yxJvvrVryZ565P+PSGIOHPWvG7b1pM3ju9Ue+Oso3LG2aWXXtruMYceemgWLlyYBx98UBABAAAA0E326kszVVVVpaqqKg8//HA2bdpU1mP69euX/v37Z/Pmzn0y+q677sqECRNSW1ubiy66KLNnz95huwULFuQjH/lIJk2alDlz5uzwk8id8eKLL+ZHP/pR2eHAs88+my1btuS0005rW3bUUUfl4IMPzsKFC5MkCxcuzKhRo1JfX9/WZvz48Vm3bl2ef/753ap3b2ScdVTOOKNzjLOO3uk4a25uzuDBg3erVgAAAADKt1cHEf369cvdd9+de+65J/vtt18++MEP5tprr/2jn7zevHlzvvjFL6a5ubnt07VJ8h//8R9tJwG3/3z6059uW79t27bcfffdueiii5IkF1xwQX72s59l2bJlHbZx7rnn5uyzz85tt92WUqm0wzoOOuigdtt6z3ve02799noGDRqUkSNH5vnnn8+MGTPKek1WrFiRAQMGdPiEcn19fdt121esWNEuhNi+fvs62jPOOipnnNE5xllH72ScLViwIPfff3+mTJlS1jYAAAAA2H27fWmmnm7SpEmZMGFCnnjiiTz55JN59NFHM2vWrHzrW9/K5MmTkyQzZszIzJkz09LSkqqqqvzN3/xNJkyY0NbHkUceme9///vt+n37Nc9/8pOfZOPGjTnrrLOSJEOHDs3pp5+eO++8MzfddFO7x51zzjl56KGH8sQTT+TDH/7wDmt+4oknUl1d3fZ7//79263fXk9LS0vmzJmT5557LtOnT+/8i0OXMc7oDsbZ7lm8eHHOOeec3HDDDRk3blwh2wAAAACgo90OIr595tyuqKNQ++yzT04//fScfvrpue666/KpT30qN9xwQ9uJu89+9rOZPHlyqqqqUl9f3+GTvQMGDMjhhx/+R/ufPXt21qxZ0+4Grdu2bcuiRYty4403pk+f//3iyde//vVcc801OfPMM/PP//zPOfnkkzv0N3LkyJ1eU/3t9Ww/yXjjjTd2OEm4Iw0NDdm8eXPWrl3bbhtNTU1t12lvaGjI008/3e5xTU1Nbeu6W8Oi57p9m++Ecfa/yhlnPdGj13y00iXsknH2vzozzn7961/n1FNPzZQpUzJz5sxd9g0AAABA19ntIKJ2YG1X1NGtjjnmmDz88MNtvw8dOnSnJ+Z2ZvXq1fne976X73znO+0uObJ169Z86EMfyo9//OOcccYZbctLpVK+8Y1vpE+fPjnrrLPywx/+sO3Gs+/UzJkzM3bs2EydOjXDhw/fadvjjz8+/fv3z2OPPZZJkyYlSZYsWZKXXnopY8aMSZKMGTMmN998c1auXJm6urokb31KuqamJsccc8xu1fpO9B0ypNu32RWMs52Ps55o/32790bsXcE42/U4e/755zN27Nhccsklufnmm3erPgAAAAA6b6++NNPq1atz/vnn59JLL83o0aNTXV2dZ555JrNmzco555xTdj9vvvlmh+uNl0ql1NfX59vf/naGDBmSj33sYx0+eXzWWWdl9uzZ7U7cbX/s1772tfTt27ft5N0pp5zStn7lypVpaWlp95ghQ4Z0uKTJdmPGjMno0aNzyy235Lbbbtvpc6mtrc1ll12Wq6++OoMHD05NTU2mT5+eMWPG5MQTT0ySjBs3Lsccc0wuvvjizJo1KytWrMjMmTMzbdq0DBw4cKf990bGWUfljLMkeeGFF7Jhw4asWLEib7zxRp577rkkb51cL/eGxb2FcdZROeNs8eLFGTt2bMaPH5+rr7667bn37ds3BxxwwE77BwAAAKBr7NVBRFVVVU444YTceuutWbp0abZs2ZLGxsZcfvnlufbaa8vu5/nnn8+wYcPaLRs4cGBaWlpy55135txzz93hjVonTZqUiy++OK+++mqHdaVSKbfffnv69OmTCRMm5JFHHmnr48gjj+zQfuHChe1O4P6hq666KpMnT86MGTPS2Ni40+dz6623pk+fPpk0aVI2bdqU8ePH54477mhb37dv3zzyyCOZOnVqxowZk3333TeXXHJJvvCFL+y0397KONuxXY2zJPnUpz6Vxx9/vO334447LkmybNmyHHLIITvtv7cxznZsV+Psn/7pn7Jq1arMmTMnc+bMaVs+YsSIvPjiizvtGwAAAICuUWptbW2tdBEAAAAAAMDeqc+umwAAAAAAALwzgoi9zL333puqqqod/rz95rOwO4wzuoNxBgAAALB3cGmmvcz69evT1NS0w3X9+/fPiBEjurki9kbGGd3BOAMAAADYOwgiAAAAAACAwrg0EwAAAAAAUBhBBAAAAAAAUJh+5TTatm1bli9fnurq6pRKpaJrAgAAAAAAerDW1tasX78+w4cPT58+O//OQ1lBxPLly9PY2NglxQEAAAAAAHuHl19+OQcddNBO25QVRFRXV7d1WFNTs/uVAQAAAAAAe6x169alsbGxLT/YmbKCiO2XY6qpqRFEAAAAAAAASVLW7RzcrBoAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAAChMp4KIrStXFlVHB03//fvcdvO385vFy/LNeS/k1fWbCt/m1qamrPvy32ZrU1Ph2yrC9tes6b9/X+lS2IVVS1/O/CtmZtXSlytdSo+x6uUluevOK/Lb//5F5v7m3qxpWVPpkqBXWNOyxnsO9kDlvHdfXb+p246jgc7pqven/TgAvZn94J6lc0HEqlVF1dHByt83Zc7muix9cWVmz1/aPUHEypVZ/7e3dmvg0pW2v2Yrf79nBim9yWu/+58c8dA9ee13/1PpUnqM1U3L8tDgZfndit/kO0vm5jU7EegWr7Ws8Z6DPVA5791X12/qtuNooHO66v1pPw5Ab2Y/uGdxaSYAAAAAAKAwgggAAAAAAKAwgggAAAAAAKAw/TrTeFvzumxdvbqoWtppXb8hSfL6m9u6ZXtvt21tc7c9z660/TXbsHlbXtu4ucLVsDOvt7yZfZOU1u2ZY60I2/7/+H1jW0uFK4HeacPmDWne1FzpMoAybdi8oey269/Y4tgQepj1b2zp0v7sxwHojTpzTEzldSqIWPPJS7OlT/d8iaJ5yMHJudfnb3+5rlu293arL/h4t2+zK2x/za56Yk3yxLxKl8NOjHz1d/m/Sd417VNZUelieojmxkHJ/zki33z1e5UuBXql6xb8VaVLAAoy/R+fqXQJQMHsxwGAns6lmQAAAAAAgMIIIgAAAAAAgMIIIgAAAAAAgMJ06h4Rg++6M0M+8CdF1dLO6icXJ0+35Orjarr9PhFDvnNf+h9zdLdusytsf81u/fDgHDXmfZUuh5343eNPJw8nr9/+rRz64e55T/V0zYvmJc135fKh57hPBFTATSfdnENqR1a6DKBMLzYvK/ua8H//iT/J4Q3VBVcEdMYLK9Z36f1b7McB6I06c0xM5XUqiOhTW5O+Q4YUVUs7peqqJC15V7/u/9JGn/1qu+15dqXtr1nVgD7Zf98BlS6HnVi1z1tvvdaaPXOsFaFPdVXSnAzqs0+lS4FeqWpAVWoH1la6DKBMVQOqym5bPai/Y0PoYaoH9e/S/uzHAeiNOnNMTOW5NBMAAAAAAFAYQQQAAAAAAFAYQQQAAAAAAFAYQQQAAAAAAFCYTgURfQ84oKg6Oqg7qD4XDViZww6py2WnHJah1QML32bfurpUX31V+tbVFb6tImx/zeoOqq90KezC/iMOzG/PvST7jziw0qX0GEPqR+bcNSMzouHoXHDkhdl/n8GVLgl6hf33Gew9B3ugct67Q6sHdttxNNA5XfX+tB8HoDezH9yzlFpbW1t31WjdunWpra1Nc3NzampquqMuAAAAAACgh+pMbuDSTAAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEUCS5NX1m/LNeS/k1fWbKl0K0AuYc6BnWNOyJnN/c2/WtKzpUX0BAFAZ/lajM1Z3YpwIIoAkb+1oZs9fakcDdAtzDvQMr7WsyXeWzM1rXRAedGVfAABUhr/V6IzVGwQRAAAAAABADyCIAAAAAAAACtOv0gUAPcv6N7bktY2bK10GsJdb/8aWSpcAvM2GzRvSvKl5t/sAAGDv4PwQ5Vj/xptltxVEAO1M/8dnKl0CANDNrlvwV5UuAQCAHsT5Icrx5qaNZbd1aSYAAAAAAKAwgggAAAAAAKAwgggAAAAAAKAw7hEBtPP3n/iTHN5QXekygL3cCyvWu+Yo9CA3nXRzDqkduVt9vNi8zL0mAAD2Es4PUY7nfrs8Y79UXltBBNBO9aD+2X/fAZUuA9jLVQ/qX+kSgLepGlCV2oG1u90HAAB7B+eHKEf1oPLjBZdmAgAAAAAACiOIAAAAAAAACiOIAAAAAAAACiOIAAAAAAAACiOIAJIkQ6sH5rJTDsvQ6oGVLgXoBcw50DPsv8/gXHDkhdl/n8E9qi8AACrD32p0xpCq8sdJqbW1tXVXjdatW5fa2to0NzenpqZmt4oDAAAAAAD2bJ3JDXwjAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKIwgAgAAAAAAKEy/chq1trYmSdatW1doMQAAAAAAQM+3PS/Ynh/sTFlBxPr165MkjY2Nu1EWAAAAAACwN1m/fn1qa2t32qbUWkZcsW3btixfvjzV1dUplUpdViDQ3rp169LY2JiXX345NTU1lS4HoEczZwKUz5wJUD5zJkB5Wltbs379+gwfPjx9+uz8LhBlfSOiT58+Oeigg7qkOGDXampqHOwAlMmcCVA+cyZA+cyZALu2q29CbOdm1QAAAAAAQGEEEQAAAAAAQGEEEdCDDBw4MDfccEMGDhxY6VIAejxzJkD5zJkA5TNnAnS9sm5WDQAAAAAA8E74RgQAAAAAAFAYQQQAAAAAAFAYQQQAAAAAAFAYQQQAAAAAAFAYQQRUwL/927/l7LPPzvDhw1MqlfLwww+3W9/a2prrr78+w4YNy6BBg3Laaaflt7/9bWWKBaigXc2XkydPTqlUavdzxhlnVKZYgAr74he/mA984AOprq5OXV1dJk6cmCVLlrRr09LSkmnTpmXIkCGpqqrKpEmT0tTUVKGKASqnnDnzlFNO6XCs+elPf7pCFQPs2QQRUAEbN27M+973vtx+++07XD9r1qx89atfzde+9rU89dRT2XfffTN+/Pi0tLR0c6UAlbWr+TJJzjjjjLzyyittP/fdd183VgjQczz++OOZNm1annzyyfzkJz/Jli1bMm7cuGzcuLGtzVVXXZUf/OAHeeCBB/L4449n+fLlOe+88ypYNUBllDNnJsnll1/e7lhz1qxZFaoYYM9Wam1tba10EdCblUqlPPTQQ5k4cWKSt74NMXz48HzmM5/JX/7lXyZJmpubU19fn7vvvjsXXHBBBasFqJw/nC+Tt74RsXbt2g7flAAgWbVqVerq6vL444/n5JNPTnNzcw444IDMnTs3f/Znf5Yk+c///M8cffTRWbhwYU488cQKVwxQOX84ZyZvfSPi2GOPzd/93d9VtjiAvYBvREAPs2zZsqxYsSKnnXZa27La2tqccMIJWbhwYQUrA+iZ5s+fn7q6uhx55JGZOnVqVq9eXemSAHqE5ubmJMngwYOTJM8++2y2bNnS7jjzqKOOysEHH+w4E+j1/nDO3O7ee+/N0KFD8973vjef+9zn8vrrr1eiPIA9Xr9KFwC0t2LFiiRJfX19u+X19fVt6wB4yxlnnJHzzjsvI0eOzNKlS3PttdfmzDPPzMKFC9O3b99KlwdQMdu2bcuVV16ZD37wg3nve9+b5K3jzAEDBmS//fZr19ZxJtDb7WjOTJILL7wwI0aMyPDhw7No0aLMmDEjS5YsyYMPPljBagH2TIIIAGCP9fbL1Y0aNSqjR4/OYYcdlvnz5+fUU0+tYGUAlTVt2rQsXrw4P/vZzypdCkCP98fmzClTprT9e9SoURk2bFhOPfXULF26NIcddlh3lwmwR3NpJuhhGhoakiRNTU3tljc1NbWtA2DHDj300AwdOjQvvPBCpUsBqJgrrrgijzzySObNm5eDDjqobXlDQ0M2b96ctWvXtmvvOBPozf7YnLkjJ5xwQpI41gR4BwQR0MOMHDkyDQ0Neeyxx9qWrVu3Lk899VTGjBlTwcoAer7f//73Wb16dYYNG1bpUgC6XWtra6644oo89NBD+dd//deMHDmy3frjjz8+/fv3b3ecuWTJkrz00kuOM4FeZ1dz5o4899xzSeJYE+AdcGkmqIANGza0+wTFsmXL8txzz2Xw4ME5+OCDc+WVV+av//qvc8QRR2TkyJG57rrrMnz48EycOLFyRQNUwM7my8GDB+fGG2/MpEmT0tDQkKVLl+aaa67J4YcfnvHjx1ewaoDKmDZtWubOnZvvfe97qa6ubrvvQ21tbQYNGpTa2tpcdtllufrqqzN48ODU1NRk+vTpGTNmTE488cQKVw/QvXY1Zy5dujRz587NWWedlSFDhmTRokW56qqrcvLJJ2f06NEVrh5gz1NqbW1trXQR0NvMnz8/H/3oRzssv+SSS3L33XentbU1N9xwQ77xjW9k7dq1+dCHPpQ77rgj7373uytQLUDl7Gy+/Id/+IdMnDgxv/zlL7N27doMHz4848aNy0033ZT6+voKVAtQWaVSaYfL77rrrkyePDlJ0tLSks985jO57777smnTpowfPz533HGHSzMBvc6u5syXX345F110URYvXpyNGzemsbEx5557bmbOnJmamppurhZgzyeIAAAAAAAACuMeEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAAAAAQGEEEQAAQDuTJ0/OxIkTK10GAACwl+hX6QIAAIDuUyqVdrr+hhtuyFe+8pW0trZ2U0UAAMDeThABAAC9yCuvvNL27/vvvz/XX399lixZ0rasqqoqVVVVlSgNAADYS7k0EwAA9CINDQ1tP7W1tSmVSu2WVVVVdbg00ymnnJLp06fnyiuvzP7775/6+vp885vfzMaNG/PJT34y1dXVOfzww/Poo4+229bixYtz5plnpqqqKvX19bn44ovz6quvdvMzBgAAKk0QAQAA7NI999yToUOH5umnn8706dMzderUnH/++TnppJPy7//+7xk3blwuvvjivP7660mStWvXZuzYsTnuuOPyzDPP5F/+5V/S1NSUj33sYxV+JgAAQHcTRAAAALv0vve9LzNnzswRRxyRz33uc9lnn30ydOjQXH755TniiCNy/fXXZ/Xq1Vm0aFGS5Lbbbstxxx2XW265JUcddVSOO+643HnnnZk3b17+67/+q8LPBgAA6E7uEQEAAOzS6NGj2/7dt2/fDBkyJKNGjWpbVl9fnyRZuXJlkuRXv/pV5s2bt8P7TSxdujTvfve7C64YAADoKQQRAADALvXv37/d76VSqd2yUqmUJNm2bVuSZMOGDTn77LPzpS99qUNfw4YNK7BSAACgpxFEAAAAXe79739/vvvd7+aQQw5Jv37+7AAAgN7MPSIAAIAuN23atKxZsyYf//jH84tf/CJLly7Nj370o3zyk5/M1q1bK10eAADQjQQRAABAlxs+fHh+/vOfZ+vWrRk3blxGjRqVK6+8Mvvtt1/69PFnCAAA9Cal1tbW1koXAQAAAAAA7J18FAkAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACiMIAIAAAAAACjM/wM8DotLF49/uQAAAABJRU5ErkJggg==\n"
+ },
+ "metadata": {},
+ "execution_count": 7
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# MRE\n",
+ "\n",
+ "Now that things are setup, edit the following cells with the piece of code allowing to reproduce the bug report.\n"
+ ],
+ "metadata": {
+ "id": "qHxFJZDxr5O1"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from pyannote.audio import Model\n",
+ "model = Model.from_pretrained(\n",
+ " \"pyannote/speaker-diarization-3.1\",\n",
+ " use_auth_token=hf_token)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 499
+ },
+ "id": "gVrDtBcusDbK",
+ "outputId": "25823c18-bff7-43b5-ef30-f5e8b2e43e2b"
+ },
+ "execution_count": 8,
+ "outputs": [
+ {
+ "output_type": "error",
+ "ename": "EntryNotFoundError",
+ "evalue": "404 Client Error. (Request ID: Root=1-659c1570-229842ad49cdd505022bd7b3;3b3426ec-0f8e-49a4-8783-f5740d92a8ed)\n\nEntry Not Found for url: https://huggingface.co/pyannote/speaker-diarization-3.1/resolve/main/pytorch_model.bin.",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mHTTPError\u001b[0m Traceback (most recent call last)",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py\u001b[0m in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 285\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 286\u001b[0;31m \u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraise_for_status\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 287\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mHTTPError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/requests/models.py\u001b[0m in \u001b[0;36mraise_for_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhttp_error_msg\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1021\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mHTTPError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhttp_error_msg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1022\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;31mHTTPError\u001b[0m: 404 Client Error: Not Found for url: https://huggingface.co/pyannote/speaker-diarization-3.1/resolve/main/pytorch_model.bin",
+ "\nThe above exception was the direct cause of the following exception:\n",
+ "\u001b[0;31mEntryNotFoundError\u001b[0m Traceback (most recent call last)",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mpyannote\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maudio\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m model = Model.from_pretrained(\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;34m\"pyannote/speaker-diarization-3.1\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m use_auth_token=hf_token)\n",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/pyannote/audio/core/model.py\u001b[0m in \u001b[0;36mfrom_pretrained\u001b[0;34m(cls, checkpoint, map_location, hparams_file, strict, use_auth_token, cache_dir, **kwargs)\u001b[0m\n\u001b[1;32m 622\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 623\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 624\u001b[0;31m path_for_pl = hf_hub_download(\n\u001b[0m\u001b[1;32m 625\u001b[0m \u001b[0mmodel_id\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 626\u001b[0m \u001b[0mHF_PYTORCH_WEIGHTS_NAME\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py\u001b[0m in \u001b[0;36m_inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msmoothly_deprecate_use_auth_token\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhas_token\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhas_token\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 118\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 119\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_inner_fn\u001b[0m \u001b[0;31m# type: ignore\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py\u001b[0m in \u001b[0;36mhf_hub_download\u001b[0;34m(repo_id, filename, subfolder, repo_type, revision, library_name, library_version, cache_dir, local_dir, local_dir_use_symlinks, user_agent, force_download, force_filename, proxies, etag_timeout, resume_download, token, local_files_only, legacy_cache_layout, endpoint)\u001b[0m\n\u001b[1;32m 1236\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1237\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1238\u001b[0;31m metadata = get_hf_file_metadata(\n\u001b[0m\u001b[1;32m 1239\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1240\u001b[0m \u001b[0mtoken\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtoken\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py\u001b[0m in \u001b[0;36m_inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msmoothly_deprecate_use_auth_token\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhas_token\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhas_token\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 118\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 119\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_inner_fn\u001b[0m \u001b[0;31m# type: ignore\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py\u001b[0m in \u001b[0;36mget_hf_file_metadata\u001b[0;34m(url, token, proxies, timeout, library_name, library_version, user_agent)\u001b[0m\n\u001b[1;32m 1629\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1630\u001b[0m \u001b[0;31m# Retrieve metadata\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1631\u001b[0;31m r = _request_wrapper(\n\u001b[0m\u001b[1;32m 1632\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"HEAD\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1633\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py\u001b[0m in \u001b[0;36m_request_wrapper\u001b[0;34m(method, url, follow_relative_redirects, **params)\u001b[0m\n\u001b[1;32m 383\u001b[0m \u001b[0;31m# Recursively follow relative redirects\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 384\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfollow_relative_redirects\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 385\u001b[0;31m response = _request_wrapper(\n\u001b[0m\u001b[1;32m 386\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmethod\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 387\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py\u001b[0m in \u001b[0;36m_request_wrapper\u001b[0;34m(method, url, follow_relative_redirects, **params)\u001b[0m\n\u001b[1;32m 407\u001b[0m \u001b[0;31m# Perform request and return if status_code is not in the retry list.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 408\u001b[0m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_session\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmethod\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 409\u001b[0;31m \u001b[0mhf_raise_for_status\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 410\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 411\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py\u001b[0m in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 294\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0merror_code\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"EntryNotFound\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 295\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34mf\"{response.status_code} Client Error.\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m\"\\n\\n\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34mf\"Entry Not Found for url: {response.url}.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 296\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mEntryNotFoundError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 297\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 298\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0merror_code\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"GatedRepo\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;31mEntryNotFoundError\u001b[0m: 404 Client Error. (Request ID: Root=1-659c1570-229842ad49cdd505022bd7b3;3b3426ec-0f8e-49a4-8783-f5740d92a8ed)\n\nEntry Not Found for url: https://huggingface.co/pyannote/speaker-diarization-3.1/resolve/main/pytorch_model.bin."
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# this does not work because `pyannote/speaker-diarization-3.1` is a not a `Model`, it is a `Pipeline`."
+ ],
+ "metadata": {
+ "id": "e4GWU8Sbsy9u"
+ },
+ "execution_count": 9,
+ "outputs": []
+ }
+ ]
+}
\ No newline at end of file
From 42ef141298e51147f93b18f8caef2049fadcdd4c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Mon, 8 Jan 2024 16:53:52 +0100
Subject: [PATCH 33/57] github: add bug_report.yml template
---
.github/ISSUE_TEMPLATE/bug_report.yml | 56 +++++++++++++++++++++++++++
1 file changed, 56 insertions(+)
create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..f75e050bf
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,56 @@
+name: Bug report
+description: Report a bug in pyannote.audio
+body:
+
+- type: markdown
+ attributes:
+ value: |
+ When reporting bugs, please follow the guidelines in this template. This helps identify the problem precisely and thus enables contributors to fix it faster.
+ - Write a descriptive issue title above.
+ - The golden rule is to **always open *one* issue for *one* bug**. If you notice several bugs and want to report them, make sure to create one new issue for each of them.
+ - Search [open](https://github.com/pyannote/pyannote-audio/issues) and [closed](https://github.com/pyannote/pyannote-audio/issues?q=is%3Aissue+is%3Aclosed) issues to ensure it has not already been reported. If you don't find a relevant match or if you're unsure, don't hesitate to **open a new issue**. The bugsquad will handle it from there if it's a duplicate.
+ - Please always check if your issue is reproducible in the latest version – it may already have been fixed!
+ - If you use a custom build, please test if your issue is reproducible in official releases too.
+
+- type: textarea
+ attributes:
+ label: Tested versions
+ description: |
+ To properly fix a bug, we need to identify if the bug was recently introduced in the engine, or if it was always present.
+ - Please specify the pyannote.audio version you found the issue in, including the **Git commit hash** if using a development build.
+ - If you can, **please test earlier pyannote.audio versions** and, if applicable, newer versions (development branch). Mention whether the bug is reproducible or not in the versions you tested.
+ - The aim is for us to identify whether a bug is a **regression**, i.e. an issue that didn't exist in a previous version, but was introduced later on, breaking existing functionality. For example, if a bug is reproducible in 3.2 but not in 3.0, we would like you to test intermediate 3.1 to find which version is the first one where the issue can be reproduced.
+ placeholder: |
+ - Reproducible in: 3.1, 3.2, and later
+ - Not reproducible in: 3.0
+ validations:
+ required: true
+
+- type: input
+ attributes:
+ label: System information
+ description: |
+ - Specify the OS version, and when relevant hardware information.
+ - For issues that are likely OS-specific and/or GPU-related, please specify the GPU model and architecture.
+ - **Bug reports not including the required information may be closed at the maintainers' discretion.** If in doubt, always include all the requested information; it's better to include too much information than not enough information.
+ placeholder: macOS 13.6 - pyannote.audio 3.1.1 - M1 Pro
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Issue description
+ description: |
+ Describe your issue briefly. What doesn't work, and how do you expect it to work instead?
+ You can include audio, images or videos with drag and drop, and format code blocks or logs with ``` tags.
+ validations:
+ required: true
+
+- type: input
+ attributes:
+ label: Minimal reproduction example (MRE)
+ description: |
+ Having reproducible issues is a prerequisite for contributors to be able to solve them.
+ Include a link to minimal reproduction example using [this Google Colab notebook](https://colab.research.google.com/github/pyannote/pyannote-audio/blob/develop/tutorials/MRE_template.ipynb) as a starting point.
+ validations:
+ required: true
From 27cd91f87ce1b31f95995d7f7ca93e20cd76c1b7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Mon, 8 Jan 2024 17:02:40 +0100
Subject: [PATCH 34/57] github: create config.yml
---
.github/ISSUE_TEMPLATE/config.yml | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
create mode 100644 .github/ISSUE_TEMPLATE/config.yml
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..764d5828a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,16 @@
+blank_issues_enabled: false
+
+contact_links:
+
+ - name: Feature request
+ url: https://github.com/pyannote/pyannote-audio/discussions
+ about: Please use Github Discussions.
+
+ - name: Consulting
+ url: https://herve.niderb.fr/consulting
+ about: Using pyannote.audio in production? Make the most of it thanks to our consulting services.
+
+ - name: Premium models
+ url: https://forms.gle/eKhn7H2zTa68sMMx8
+ about: We are considering selling premium models, extensions, or services around pyannote.audio.
+
From eb2e813b36cfe611b0b8ca275b4aba27349a510d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Mon, 8 Jan 2024 17:04:22 +0100
Subject: [PATCH 35/57] github: update config.yml (#1607)
---
.github/ISSUE_TEMPLATE/config.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 764d5828a..70e131dc2 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -4,7 +4,7 @@ contact_links:
- name: Feature request
url: https://github.com/pyannote/pyannote-audio/discussions
- about: Please use Github Discussions.
+ about: Suggest an idea for this project.
- name: Consulting
url: https://herve.niderb.fr/consulting
From eda0c51b134c3c0db414b87ce0e773bdd682c3e9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Mon, 8 Jan 2024 17:05:05 +0100
Subject: [PATCH 36/57] Delete .github/ISSUE_TEMPLATE/feature_request.md
---
.github/ISSUE_TEMPLATE/feature_request.md | 20 --------------------
1 file changed, 20 deletions(-)
delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index 4ead48053..000000000
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for this project
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Is your feature request related to a problem? Please describe.**
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-
-**Describe the solution you'd like**
-A clear and concise description of what you want to happen.
-
-**Describe alternatives you've considered**
-A clear and concise description of any alternative solutions or features you've considered.
-
-**Additional context**
-Add any other context about the feature request here.
From 8f477fada9a595dc0c0e54de92e8c43cd9b1a7ea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Tue, 9 Jan 2024 13:06:09 +0100
Subject: [PATCH 37/57] fix(task): fix random generators (#1594)
Before this change, each worker would select the same files, resulting in less randomness than expected.
---
CHANGELOG.md | 4 +++
pyannote/audio/tasks/embedding/mixins.py | 2 +-
pyannote/audio/tasks/segmentation/mixins.py | 27 +++++++++++++--------
pyannote/audio/utils/random.py | 23 +++++++++++-------
tutorials/add_your_own_task.ipynb | 2 +-
5 files changed, 37 insertions(+), 21 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4bcaa93b6..75f4fab62 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,10 @@
- feat(pipeline): add `Waveform` and `SampleRate` preprocessors
- feat(model): add `num_frames` and `receptive_field` to segmentation models
+### Fixes
+
+- fix(task): fix random generators
+
## Version 3.1.1 (2023-12-01)
### TL;DR
diff --git a/pyannote/audio/tasks/embedding/mixins.py b/pyannote/audio/tasks/embedding/mixins.py
index da164f04e..03875256e 100644
--- a/pyannote/audio/tasks/embedding/mixins.py
+++ b/pyannote/audio/tasks/embedding/mixins.py
@@ -145,7 +145,7 @@ def train__iter__(self):
"""
# create worker-specific random number generator
- rng = create_rng_for_worker(self.model.current_epoch)
+ rng = create_rng_for_worker(self.model)
classes = list(self.specifications.classes)
diff --git a/pyannote/audio/tasks/segmentation/mixins.py b/pyannote/audio/tasks/segmentation/mixins.py
index 018e8db70..94c6da506 100644
--- a/pyannote/audio/tasks/segmentation/mixins.py
+++ b/pyannote/audio/tasks/segmentation/mixins.py
@@ -421,12 +421,16 @@ def train__iter__helper(self, rng: random.Random, **filters):
# indices of training files that matches domain filters
training = self.metadata["subset"] == Subsets.index("train")
for key, value in filters.items():
- training &= self.metadata[key] == self.metadata_unique_values[key].index(value)
+ training &= self.metadata[key] == self.metadata_unique_values[key].index(
+ value
+ )
file_ids = np.where(training)[0]
# turn annotated duration into a probability distribution
annotated_duration = self.annotated_duration[file_ids]
- prob_annotated_duration = annotated_duration / np.sum(annotated_duration)
+ cum_prob_annotated_duration = np.cumsum(
+ annotated_duration / np.sum(annotated_duration)
+ )
duration = self.duration
@@ -434,7 +438,7 @@ def train__iter__helper(self, rng: random.Random, **filters):
while True:
# select one file at random (with probability proportional to its annotated duration)
- file_id = np.random.choice(file_ids, p=prob_annotated_duration)
+ file_id = file_ids[cum_prob_annotated_duration.searchsorted(rng.random())]
# generate `num_chunks_per_file` chunks from this file
for _ in range(num_chunks_per_file):
@@ -444,14 +448,17 @@ def train__iter__helper(self, rng: random.Random, **filters):
)[0]
# turn annotated regions duration into a probability distribution
- prob_annotated_regions_duration = self.annotated_regions["duration"][
- annotated_region_indices
- ] / np.sum(self.annotated_regions["duration"][annotated_region_indices])
+ cum_prob_annotated_regions_duration = np.cumsum(
+ self.annotated_regions["duration"][annotated_region_indices]
+ / np.sum(
+ self.annotated_regions["duration"][annotated_region_indices]
+ )
+ )
# selected one annotated region at random (with probability proportional to its duration)
- annotated_region_index = np.random.choice(
- annotated_region_indices, p=prob_annotated_regions_duration
- )
+ annotated_region_index = annotated_region_indices[
+ cum_prob_annotated_regions_duration.searchsorted(rng.random())
+ ]
# select one chunk at random in this annotated region
_, _, start, end = self.annotated_regions[annotated_region_index]
@@ -475,7 +482,7 @@ def train__iter__(self):
"""
# create worker-specific random number generator
- rng = create_rng_for_worker(self.model.current_epoch)
+ rng = create_rng_for_worker(self.model)
balance = getattr(self, "balance", None)
if balance is None:
diff --git a/pyannote/audio/utils/random.py b/pyannote/audio/utils/random.py
index 97d50c362..4980d3520 100644
--- a/pyannote/audio/utils/random.py
+++ b/pyannote/audio/utils/random.py
@@ -27,7 +27,7 @@
import torch
-def create_rng_for_worker(epoch: int) -> Random:
+def create_rng_for_worker(model) -> Random:
"""Create worker-specific random number generator
This makes sure that
@@ -43,19 +43,24 @@ def create_rng_for_worker(epoch: int) -> Random:
# create random number generator
rng = Random()
- # create seed as a combination of PL_GLOBAL_SEED (set by pl.seed_everything())
- # and other PL multi-processing variables
- global_seed = int(os.environ.get("PL_GLOBAL_SEED", "0"))
- local_rank = int(os.environ.get("LOCAL_RANK", "0"))
- node_rank = int(os.environ.get("NODE_RANK", "0"))
-
+ global_seed = os.environ.get("PL_GLOBAL_SEED", "unset")
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
- worker_id = 0
+ worker_id = None
else:
worker_id = worker_info.id
- rng.seed(hash((global_seed, worker_id, local_rank, node_rank, epoch)))
+ seed = hash(
+ (
+ global_seed,
+ worker_id,
+ model.local_rank,
+ model.global_rank,
+ model.current_epoch,
+ )
+ )
+
+ rng.seed(seed)
return rng
diff --git a/tutorials/add_your_own_task.ipynb b/tutorials/add_your_own_task.ipynb
index 251846957..6e1575dc8 100644
--- a/tutorials/add_your_own_task.ipynb
+++ b/tutorials/add_your_own_task.ipynb
@@ -236,7 +236,7 @@
"\n",
" # create worker-specific random number generator (RNG) to avoid this common bug:\n",
" # tanelp.github.io/posts/a-bug-that-plagues-thousands-of-open-source-ml-projects/\n",
- " rng = create_rng_for_worker(self.model.current_epoch)\n",
+ " rng = create_rng_for_worker(self.model)\n",
"\n",
" # load list and number of classes\n",
" classes = self.specifications.classes\n",
From d41ce0a73d8c21208fddccbfd51804b9b78de2f3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Thu, 11 Jan 2024 13:04:18 +0100
Subject: [PATCH 38/57] doc: fix typo in README
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 696fb219d..2c6a889f1 100644
--- a/README.md
+++ b/README.md
@@ -70,7 +70,7 @@ for turn, _, speaker in diarization.itertracks(yield_label=True):
- Videos
- [Introduction to speaker diarization](https://umotion.univ-lemans.fr/video/9513-speech-segmentation-and-speaker-diarization/) / JSALT 2023 summer school / 90 min
- [Speaker segmentation model](https://www.youtube.com/watch?v=wDH2rvkjymY) / Interspeech 2021 / 3 min
- - [First releaase of pyannote.audio](https://www.youtube.com/watch?v=37R_R82lfwA) / ICASSP 2020 / 8 min
+ - [First release of pyannote.audio](https://www.youtube.com/watch?v=37R_R82lfwA) / ICASSP 2020 / 8 min
## Benchmark
From 9e4ec5f6e0b0f5d60c557981fc680e8cebf78f5b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Cl=C3=A9ment=20Pag=C3=A9s?=
<55240756+clement-pages@users.noreply.github.com>
Date: Fri, 12 Jan 2024 10:46:46 +0100
Subject: [PATCH 39/57] feat(task): add option to cache training metadata to
disk
Co-authored-by: Herve Bredin
---
.github/ISSUE_TEMPLATE/bug_report.yml | 6 +-
.github/ISSUE_TEMPLATE/config.yml | 5 +-
CHANGELOG.md | 5 +
pyannote/audio/core/model.py | 23 +-
pyannote/audio/core/task.py | 423 ++++++++++++++++--
pyannote/audio/tasks/embedding/mixins.py | 16 +-
pyannote/audio/tasks/segmentation/mixins.py | 400 +++--------------
.../audio/tasks/segmentation/multilabel.py | 141 +++++-
.../overlapped_speech_detection.py | 23 +-
.../tasks/segmentation/speaker_diarization.py | 47 +-
.../segmentation/voice_activity_detection.py | 23 +-
tests/tasks/test_reproducibility.py | 48 +-
tests/test_train.py | 172 ++++++-
tutorials/MRE_template.ipynb | 2 +-
tutorials/adapting_pretrained_pipeline.ipynb | 1 +
tutorials/add_your_own_task.ipynb | 110 +++--
16 files changed, 949 insertions(+), 496 deletions(-)
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index f75e050bf..6f024c73b 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -10,7 +10,7 @@ body:
- The golden rule is to **always open *one* issue for *one* bug**. If you notice several bugs and want to report them, make sure to create one new issue for each of them.
- Search [open](https://github.com/pyannote/pyannote-audio/issues) and [closed](https://github.com/pyannote/pyannote-audio/issues?q=is%3Aissue+is%3Aclosed) issues to ensure it has not already been reported. If you don't find a relevant match or if you're unsure, don't hesitate to **open a new issue**. The bugsquad will handle it from there if it's a duplicate.
- Please always check if your issue is reproducible in the latest version – it may already have been fixed!
- - If you use a custom build, please test if your issue is reproducible in official releases too.
+ - If you use a custom build, please test if your issue is reproducible in official releases too.
- type: textarea
attributes:
@@ -18,7 +18,7 @@ body:
description: |
To properly fix a bug, we need to identify if the bug was recently introduced in the engine, or if it was always present.
- Please specify the pyannote.audio version you found the issue in, including the **Git commit hash** if using a development build.
- - If you can, **please test earlier pyannote.audio versions** and, if applicable, newer versions (development branch). Mention whether the bug is reproducible or not in the versions you tested.
+ - If you can, **please test earlier pyannote.audio versions** and, if applicable, newer versions (development branch). Mention whether the bug is reproducible or not in the versions you tested.
- The aim is for us to identify whether a bug is a **regression**, i.e. an issue that didn't exist in a previous version, but was introduced later on, breaking existing functionality. For example, if a bug is reproducible in 3.2 but not in 3.0, we would like you to test intermediate 3.1 to find which version is the first one where the issue can be reproduced.
placeholder: |
- Reproducible in: 3.1, 3.2, and later
@@ -33,7 +33,7 @@ body:
- Specify the OS version, and when relevant hardware information.
- For issues that are likely OS-specific and/or GPU-related, please specify the GPU model and architecture.
- **Bug reports not including the required information may be closed at the maintainers' discretion.** If in doubt, always include all the requested information; it's better to include too much information than not enough information.
- placeholder: macOS 13.6 - pyannote.audio 3.1.1 - M1 Pro
+ placeholder: macOS 13.6 - pyannote.audio 3.1.1 - M1 Pro
validations:
required: true
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 70e131dc2..6d6ee3543 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -2,7 +2,7 @@ blank_issues_enabled: false
contact_links:
- - name: Feature request
+ - name: Feature request
url: https://github.com/pyannote/pyannote-audio/discussions
about: Suggest an idea for this project.
@@ -12,5 +12,4 @@ contact_links:
- name: Premium models
url: https://forms.gle/eKhn7H2zTa68sMMx8
- about: We are considering selling premium models, extensions, or services around pyannote.audio.
-
+ about: We are considering selling premium models, extensions, or services around pyannote.audio.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 75f4fab62..1cff92eb4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,7 @@
### New features
+- feat(task): add option to cache task training metadata to speed up training
- feat(pipeline): add `Waveform` and `SampleRate` preprocessors
- feat(model): add `num_frames` and `receptive_field` to segmentation models
@@ -11,6 +12,10 @@
- fix(task): fix random generators
+## Breaking changes
+
+- BREAKING(task): custom tasks need to be updated (see "Add your own task" tutorial)
+
## Version 3.1.1 (2023-12-01)
### TL;DR
diff --git a/pyannote/audio/core/model.py b/pyannote/audio/core/model.py
index bedb7f6c4..098c15c02 100644
--- a/pyannote/audio/core/model.py
+++ b/pyannote/audio/core/model.py
@@ -142,8 +142,8 @@ def specifications(self) -> Union[Specifications, Tuple[Specifications]]:
except AttributeError as e:
raise UnknownSpecificationsError(
"Task specifications are not available. This is most likely because they depend on "
- "the content of the training subset. Use `model.task.setup()` to go over the training "
- "subset and fix this, or let lightning trainer do that for you in `trainer.fit(model)`."
+ "the content of the training subset. Use `model.prepare_data()` and `model.setup()` "
+ "to go over the training subset and fix this, or let lightning trainer do that for you in `trainer.fit(model)`."
) from e
return specifications
@@ -217,9 +217,19 @@ def __example_output(
self.specifications, __example_output, example_output
)
+ def prepare_data(self):
+ self.task.prepare_data()
+
def setup(self, stage=None):
if stage == "fit":
- self.task.setup_metadata()
+ # let the task know about the trainer (e.g for broadcasting
+ # cache path between multi-GPU training processes).
+ self.task.trainer = self.trainer
+
+ # setup the task if defined (only on training and validation stages,
+ # but not for basic inference)
+ if self.task:
+ self.task.setup(stage)
# list of layers before adding task-dependent layers
before = set((name, id(module)) for name, module in self.named_modules())
@@ -252,7 +262,7 @@ def setup(self, stage=None):
module.to(self.device)
# add (trainable) loss function (e.g. ArcFace has its own set of trainable weights)
- if stage == "fit":
+ if self.task:
# let task know about the model
self.task.model = self
# setup custom loss function
@@ -468,9 +478,8 @@ def __by_name(
if isinstance(modules, str):
modules = [modules]
- for name, module in ModelSummary(self, max_depth=-1).named_modules:
- if name not in modules:
- continue
+ for name in modules:
+ module = getattr(self, name)
for parameter in module.parameters(recurse=True):
parameter.requires_grad = requires_grad
diff --git a/pyannote/audio/core/task.py b/pyannote/audio/core/task.py
index 1edfbc35c..5c0b16f29 100644
--- a/pyannote/audio/core/task.py
+++ b/pyannote/audio/core/task.py
@@ -23,19 +23,25 @@
from __future__ import annotations
+import itertools
import multiprocessing
import sys
import warnings
+from collections import defaultdict
from dataclasses import dataclass
from enum import Enum
from functools import cached_property, partial
from numbers import Number
+from pathlib import Path
+from tempfile import mkstemp
from typing import Dict, List, Literal, Optional, Sequence, Text, Tuple, Union
+import numpy as np
import pytorch_lightning as pl
import scipy.special
import torch
from pyannote.database import Protocol
+from pyannote.database.protocol.protocol import Scope, Subset
from torch.utils.data import DataLoader, Dataset, IterableDataset
from torch_audiomentations import Identity
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
@@ -44,6 +50,9 @@
from pyannote.audio.utils.loss import binary_cross_entropy, nll_loss
from pyannote.audio.utils.protocol import check_protocol
+Subsets = list(Subset.__args__)
+Scopes = list(Scope.__args__)
+
# Type of machine learning problem
class Problem(Enum):
@@ -151,6 +160,41 @@ def __len__(self):
return self.task.val__len__()
+def get_dtype(value: int, unsigned: Optional[bool] = False) -> str:
+ """Return the most suitable type for storing the
+ value passed in parameter in memory.
+
+ Parameters
+ ----------
+ value: int
+ value whose type is best suited to storage in memory
+ unsigned: bool, optional
+ positive integer mode only. Default to False
+
+ Returns
+ -------
+ str:
+ numpy formatted type
+ (see https://numpy.org/doc/stable/reference/arrays.dtypes.html)
+ """
+ if unsigned:
+ if value < 0:
+ raise ValueError(
+ f"negative value ({value}) is incompatible with unsigned types"
+ )
+ # unsigned byte (8 bits), unsigned short (16 bits), unsigned int (32 bits)
+ types_list = [(255, "B"), (65_535, "u2"), (4_294_967_296, "u4")]
+ else:
+ # signe byte (8 bits), signed short (16 bits), signed int (32 bits):
+ types_list = [(127, "b"), (32_768, "i2"), (2_147_483_648, "i")]
+ filtered_list = [
+ (max_val, type) for max_val, type in types_list if max_val > abs(value)
+ ]
+ if not filtered_list:
+ return "u8" if unsigned else "i8" # unsigned or signed long (64 bits)
+ return filtered_list[0][1]
+
+
class Task(pl.LightningDataModule):
"""Base task class
@@ -169,6 +213,13 @@ class Task(pl.LightningDataModule):
----------
protocol : Protocol
pyannote.database protocol
+ cache : str, optional
+ As (meta-)data preparation might take a very long time for large datasets,
+ it can be cached to disk for later (and faster!) re-use.
+ When `cache` does not exist, `Task.prepare_data()` generates training
+ and validation metadata from `protocol` and save them to disk.
+ When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
+ are loaded from disk. Defaults to a temporary path.
duration : float, optional
Chunks duration in seconds. Defaults to two seconds (2.).
min_duration : float, optional
@@ -201,11 +252,13 @@ class Task(pl.LightningDataModule):
----------
specifications : Specifications or tuple of Specifications
Task specifications (available after `Task.setup` has been called.)
+
"""
def __init__(
self,
protocol: Protocol,
+ cache: Optional[Union[str, None]] = None,
duration: float = 2.0,
min_duration: float = None,
warm_up: Union[float, Tuple[float, float]] = 0.0,
@@ -221,8 +274,16 @@ def __init__(
self.protocol, checks = check_protocol(protocol)
self.has_validation = checks["has_validation"]
self.has_scope = checks["has_scope"]
+ if not self.has_scope:
+ raise ValueError(
+ "Protocol must provide 'scope' information (e.g. 'file', 'database', or 'global')."
+ )
+
self.has_classes = checks["has_classes"]
+ # metadata cache
+ self.cache = Path(cache) if cache else cache
+
# batching
self.duration = duration
self.min_duration = duration if min_duration is None else min_duration
@@ -255,24 +316,347 @@ def __init__(
self._metric = metric
def prepare_data(self):
- """Use this to download and prepare data
-
- This is where we might end up downloading datasets
- and transform them so that they are ready to be used
- with pyannote.database. but for now, the API assume
- that we directly provide a pyannote.database.Protocol.
+ """Use this to prepare data from task protocol
Notes
-----
- Called only once.
+ Called only once on the main process (and only on it), for global_rank 0.
+
+ After this method is called, the task should have a `prepared_data` attribute
+ with the following dictionary structure:
+
+ prepared_data = {
+ 'protocol': name of the protocol
+ 'audio-path': array of N paths to audio
+ 'audio-metadata': array of N audio infos such as audio subset, scope and database
+ 'audio-info': array of N audio torchaudio.info struct
+ 'audio-encoding': array of N audio encodings
+ 'audio-annotated': array of N annotated duration (usually equals file duration but might be shorter if file is not fully annotated)
+ 'annotations-regions': array of M annotated regions
+ 'annotations-segments': array of M' annotated segments
+ 'metadata-values': dict of lists of values for subset, scope and database
+ 'metadata-`database-name`-labels': array of `database-name` labels. Each database with "database" scope labels has it own array.
+ 'metadata-labels': array of global scope labels
+ }
+
+ """
+
+ if self.cache:
+ # check if cache exists and is not empty:
+ if self.cache.exists() and self.cache.stat().st_size > 0:
+ # data was already created, nothing to do
+ return
+ # create parent directory if needed
+ self.cache.parent.mkdir(parents=True, exist_ok=True)
+ else:
+ # if no cache was provided by user, create a temporary file
+ # in system directory used for temp files
+ self.cache = Path(mkstemp()[1])
+
+ # list of possible values for each metadata key
+ # (will become .prepared_data[""])
+ metadata_unique_values = defaultdict(list)
+ metadata_unique_values["subset"] = Subsets
+ metadata_unique_values["scope"] = Scopes
+
+ audios = list() # list of path to audio files
+ audio_infos = list()
+ audio_encodings = list()
+ metadata = list() # list of metadata
+
+ annotated_duration = list() # total duration of annotated regions (per file)
+ annotated_regions = list() # annotated regions
+ annotations = list() # actual annotations
+ unique_labels = list()
+ database_unique_labels = {}
+
+ if self.has_validation:
+ files_iter = itertools.chain(
+ self.protocol.train(), self.protocol.development()
+ )
+ else:
+ files_iter = self.protocol.train()
+
+ for file_id, file in enumerate(files_iter):
+ # gather metadata and update metadata_unique_values so that each metadatum
+ # (e.g. source database or label) is represented by an integer.
+ metadatum = dict()
+
+ # keep track of source database and subset (train, development, or test)
+ if file["database"] not in metadata_unique_values["database"]:
+ metadata_unique_values["database"].append(file["database"])
+ metadatum["database"] = metadata_unique_values["database"].index(
+ file["database"]
+ )
+ metadatum["subset"] = Subsets.index(file["subset"])
+
+ # keep track of label scope (file, database, or global)
+ metadatum["scope"] = Scopes.index(file["scope"])
+
+ remaining_metadata_keys = set(file) - set(
+ [
+ "uri",
+ "database",
+ "subset",
+ "audio",
+ "torchaudio.info",
+ "scope",
+ "classes",
+ "annotation",
+ "annotated",
+ ]
+ )
+
+ # keep track of any other (integer or string) metadata provided by the protocol
+ # (e.g. a "domain" key for domain-adversarial training)
+ for key in remaining_metadata_keys:
+ value = file[key]
+
+ if isinstance(value, str):
+ if value not in metadata_unique_values[key]:
+ metadata_unique_values[key].append(value)
+ metadatum[key] = metadata_unique_values[key].index(value)
+
+ elif isinstance(value, int):
+ metadatum[key] = value
+
+ else:
+ warnings.warn(
+ f"Ignoring '{key}' metadata because of its type ({type(value)}). Only str and int are supported for now.",
+ category=UserWarning,
+ )
+
+ metadata.append(metadatum)
+
+ # reset list of file-scoped labels
+ file_unique_labels = list()
+
+ # path to audio file
+ audios.append(str(file["audio"]))
+
+ # audio info
+ audio_info = file["torchaudio.info"]
+ audio_infos.append(
+ (
+ audio_info.sample_rate, # sample rate
+ audio_info.num_frames, # number of frames
+ audio_info.num_channels, # number of channels
+ audio_info.bits_per_sample, # bits per sample
+ )
+ )
+ audio_encodings.append(audio_info.encoding) # encoding
+
+ # annotated regions and duration
+ _annotated_duration = 0.0
+ for segment in file["annotated"]:
+ # skip annotated regions that are shorter than training chunk duration
+ if segment.duration < self.duration:
+ continue
+
+ # append annotated region
+ annotated_region = (
+ file_id,
+ segment.duration,
+ segment.start,
+ )
+ annotated_regions.append(annotated_region)
+
+ # increment annotated duration
+ _annotated_duration += segment.duration
+
+ # append annotated duration
+ annotated_duration.append(_annotated_duration)
+
+ # annotations
+ for segment, _, label in file["annotation"].itertracks(yield_label=True):
+ # "scope" is provided by speaker diarization protocols to indicate
+ # whether speaker labels are local to the file ('file'), consistent across
+ # all files in a database ('database'), or globally consistent ('global')
+
+ # 0 = 'file' / 1 = 'database' / 2 = 'global'
+ scope = Scopes.index(file["scope"])
+
+ # update list of file-scope labels
+ if label not in file_unique_labels:
+ file_unique_labels.append(label)
+ # and convert label to its (file-scope) index
+ file_label_idx = file_unique_labels.index(label)
+
+ database_label_idx = global_label_idx = -1
+
+ if scope > 0: # 'database' or 'global'
+ # update list of database-scope labels
+ database = file["database"]
+ if database not in database_unique_labels:
+ database_unique_labels[database] = []
+ if label not in database_unique_labels[database]:
+ database_unique_labels[database].append(label)
+
+ # and convert label to its (database-scope) index
+ database_label_idx = database_unique_labels[database].index(label)
+
+ if scope > 1: # 'global'
+ # update list of global-scope labels
+ if label not in unique_labels:
+ unique_labels.append(label)
+ # and convert label to its (global-scope) index
+ global_label_idx = unique_labels.index(label)
+
+ annotations.append(
+ (
+ file_id, # index of file
+ segment.start, # start time
+ segment.end, # end time
+ file_label_idx, # file-scope label index
+ database_label_idx, # database-scope label index
+ global_label_idx, # global-scope index
+ )
+ )
+
+ # since not all metadata keys are present in all files, fallback to -1 when a key is missing
+ metadata = [
+ tuple(metadatum.get(key, -1) for key in metadata_unique_values)
+ for metadatum in metadata
+ ]
+ metadata_dtype = [
+ (key, get_dtype(max(m[i] for m in metadata)))
+ for i, key in enumerate(metadata_unique_values)
+ ]
+
+ # turn list of files metadata into a single numpy array
+ # TODO: improve using https://github.com/pytorch/pytorch/issues/13246#issuecomment-617140519
+ info_dtype = [
+ (
+ "sample_rate",
+ get_dtype(max(ai[0] for ai in audio_infos), unsigned=True),
+ ),
+ (
+ "num_frames",
+ get_dtype(max(ai[1] for ai in audio_infos), unsigned=True),
+ ),
+ ("num_channels", "B"),
+ ("bits_per_sample", "B"),
+ ]
+
+ # turn list of annotated regions into a single numpy array
+ region_dtype = [
+ (
+ "file_id",
+ get_dtype(max(ar[0] for ar in annotated_regions), unsigned=True),
+ ),
+ ("duration", "f"),
+ ("start", "f"),
+ ]
+
+ # turn list of annotations into a single numpy array
+ segment_dtype = [
+ (
+ "file_id",
+ get_dtype(max(a[0] for a in annotations), unsigned=True),
+ ),
+ ("start", "f"),
+ ("end", "f"),
+ ("file_label_idx", get_dtype(max(a[3] for a in annotations))),
+ ("database_label_idx", get_dtype(max(a[4] for a in annotations))),
+ ("global_label_idx", get_dtype(max(a[5] for a in annotations))),
+ ]
+
+ # save all protocol data in a dict
+ prepared_data = {}
+
+ # keep track of protocol name
+ prepared_data["protocol"] = self.protocol.name
+
+ prepared_data["audio-path"] = np.array(audios, dtype=np.string_)
+ audios.clear()
+
+ prepared_data["audio-metadata"] = np.array(metadata, dtype=metadata_dtype)
+ metadata.clear()
+
+ prepared_data["audio-info"] = np.array(audio_infos, dtype=info_dtype)
+ audio_infos.clear()
+
+ prepared_data["audio-encoding"] = np.array(audio_encodings, dtype=np.string_)
+ audio_encodings.clear()
+
+ prepared_data["audio-annotated"] = np.array(annotated_duration)
+ annotated_duration.clear()
+
+ prepared_data["annotations-regions"] = np.array(
+ annotated_regions, dtype=region_dtype
+ )
+ annotated_regions.clear()
+
+ prepared_data["annotations-segments"] = np.array(
+ annotations, dtype=segment_dtype
+ )
+ annotations.clear()
+
+ prepared_data["metadata-values"] = metadata_unique_values
+
+ for database, labels in database_unique_labels.items():
+ prepared_data[f"metadata-{database}-labels"] = np.array(
+ labels, dtype=np.string_
+ )
+ database_unique_labels.clear()
+
+ prepared_data["metadata-labels"] = np.array(unique_labels, dtype=np.string_)
+ unique_labels.clear()
+
+ self.prepare_validation(prepared_data)
+ self.post_prepare_data(prepared_data)
+
+ # save prepared data on the disk
+ with open(self.cache, "wb") as cache_file:
+ np.savez_compressed(cache_file, **prepared_data)
+
+ def post_prepare_data(self, prepared_data: Dict):
+ """Method for completing `prepared_data` with task-specific data.
+ For instance, for a classification task, this could be a list of
+ possible classes.
+
+ Parameters
+ ----------
+ prepared_data: dict
+ dictionnary containing protocol data prepared by
+ `prepare_data()`
+ Note
+ ----
+ This method does not return anything. Thus, user have to directly modify
+ `prepared_data`, for updates to be taken into account
"""
pass
+ def setup(self, stage=None):
+ """Setup data cached by prepare_data into the task on each device"""
+
+ # send cache path on all processes used for the training,
+ # allowing them to access the cache generated by prepare_data
+ if stage == "fit":
+ self.cache = self.trainer.strategy.broadcast(self.cache)
+
+ try:
+ with open(self.cache, "rb") as cache_file:
+ self.prepared_data = dict(np.load(cache_file, allow_pickle=True))
+ except FileNotFoundError:
+ print(
+ "Cached data for protocol not found. Ensure that prepare_data() was called",
+ " and executed correctly or/and that the path to the task cache is correct.",
+ )
+ raise
+
+ # checks that the task current protocol matches the cached protocol
+ if self.protocol.name != self.prepared_data["protocol"]:
+ raise ValueError(
+ f"Protocol specified for the task ({self.protocol.name}) "
+ f"does not correspond to the cached one ({self.prepared_data['protocol']})"
+ )
+
@property
def specifications(self) -> Union[Specifications, Tuple[Specifications]]:
# setup metadata on-demand the first time specifications are requested and missing
if not hasattr(self, "_specifications"):
- self.setup_metadata()
+ self.setup()
return self._specifications
@specifications.setter
@@ -281,29 +665,6 @@ def specifications(
):
self._specifications = specifications
- @property
- def has_setup_metadata(self):
- return getattr(self, "_has_setup_metadata", False)
-
- @has_setup_metadata.setter
- def has_setup_metadata(self, value: bool):
- self._has_setup_metadata = value
-
- def setup_metadata(self):
- """Called at the beginning of training at the very beginning of Model.setup(stage="fit")
-
- Notes
- -----
- This hook is called on every process when using DDP.
-
- If `specifications` attribute has not been set in `__init__`,
- `setup` is your last chance to set it.
- """
-
- if not self.has_setup_metadata:
- self.setup()
- self.has_setup_metadata = True
-
def setup_loss_func(self):
pass
diff --git a/pyannote/audio/tasks/embedding/mixins.py b/pyannote/audio/tasks/embedding/mixins.py
index 03875256e..9b404f9cf 100644
--- a/pyannote/audio/tasks/embedding/mixins.py
+++ b/pyannote/audio/tasks/embedding/mixins.py
@@ -75,7 +75,7 @@ def batch_size(self) -> int:
def batch_size(self, batch_size: int):
self.batch_size_ = batch_size
- def setup(self):
+ def setup(self, stage=None):
# loop over the training set, remove annotated regions shorter than
# chunk duration, and keep track of the reference annotations, per class.
@@ -119,12 +119,6 @@ def setup(self):
classes=sorted(self._train),
)
- if not self.has_validation:
- return
-
- if isinstance(self.protocol, SpeakerVerificationProtocol):
- self._validation = list(self.protocol.development_trial())
-
def default_metric(
self,
) -> Union[Metric, Sequence[Metric], Dict[str, Metric]]:
@@ -250,9 +244,13 @@ def training_step(self, batch, batch_idx: int):
return {"loss": loss}
+ def prepare_validation(self, prepared_dict: Dict):
+ if isinstance(self.protocol, SpeakerVerificationProtocol):
+ prepared_dict["validation"] = list(self.protocol.development_trial())
+
def val__getitem__(self, idx):
if isinstance(self.protocol, SpeakerVerificationProtocol):
- trial = self._validation[idx]
+ trial = self.prepared_data["validation"][idx]
data = dict()
for idx in [1, 2]:
@@ -281,7 +279,7 @@ def val__getitem__(self, idx):
def val__len__(self):
if isinstance(self.protocol, SpeakerVerificationProtocol):
- return len(self._validation)
+ return len(self.prepared_data["validation"])
elif isinstance(self.protocol, SpeakerDiarizationProtocol):
return 0
diff --git a/pyannote/audio/tasks/segmentation/mixins.py b/pyannote/audio/tasks/segmentation/mixins.py
index 94c6da506..1af863d89 100644
--- a/pyannote/audio/tasks/segmentation/mixins.py
+++ b/pyannote/audio/tasks/segmentation/mixins.py
@@ -23,14 +23,11 @@
import itertools
import math
import random
-import warnings
-from collections import defaultdict
from typing import Dict, Sequence, Union
import matplotlib.pyplot as plt
import numpy as np
import torch
-from pyannote.database.protocol import SegmentationProtocol, SpeakerDiarizationProtocol
from pyannote.database.protocol.protocol import Scope, Subset
from pytorch_lightning.loggers import MLFlowLogger, TensorBoardLogger
from torch.utils.data._utils.collate import default_collate
@@ -38,23 +35,23 @@
from torchmetrics import Metric
from torchmetrics.classification import BinaryAUROC, MulticlassAUROC, MultilabelAUROC
-from pyannote.audio.core.task import Problem
+from pyannote.audio.core.task import Problem, Task, get_dtype
from pyannote.audio.utils.random import create_rng_for_worker
Subsets = list(Subset.__args__)
Scopes = list(Scope.__args__)
-class SegmentationTaskMixin:
+class SegmentationTask(Task):
"""Methods common to most segmentation tasks"""
def get_file(self, file_id):
file = dict()
- file["audio"] = str(self.audios[file_id], encoding="utf-8")
+ file["audio"] = str(self.prepared_data["audio-path"][file_id], encoding="utf-8")
- _audio_info = self.audio_infos[file_id]
- _encoding = self.audio_encodings[file_id]
+ _audio_info = self.prepared_data["audio-info"][file_id]
+ _encoding = self.prepared_data["audio-encoding"][file_id]
sample_rate = _audio_info["sample_rate"]
num_frames = _audio_info["num_frames"]
@@ -71,319 +68,6 @@ def get_file(self, file_id):
return file
- def setup(self):
- """Setup"""
-
- # duration of training chunks
- # TODO: handle variable duration case
- duration = getattr(self, "duration", 0.0)
-
- # list of possible values for each metadata key
- metadata_unique_values = defaultdict(list)
-
- metadata_unique_values["subset"] = Subsets
-
- if isinstance(self.protocol, SpeakerDiarizationProtocol):
- metadata_unique_values["scope"] = Scopes
-
- elif isinstance(self.protocol, SegmentationProtocol):
- classes = getattr(self, "classes", list())
-
- # make sure classes attribute exists (and set to None if it did not exist)
- self.classes = getattr(self, "classes", None)
- if self.classes is None:
- classes = list()
- # metadata_unique_values["classes"] = list(classes)
-
- audios = list() # list of path to audio files
- audio_infos = list()
- audio_encodings = list()
- metadata = list() # list of metadata
-
- annotated_duration = list() # total duration of annotated regions (per file)
- annotated_regions = list() # annotated regions
- annotations = list() # actual annotations
- annotated_classes = list() # list of annotated classes (per file)
- unique_labels = list()
-
- if self.has_validation:
- files_iter = itertools.chain(
- self.protocol.train(), self.protocol.development()
- )
- else:
- files_iter = self.protocol.train()
-
- for file_id, file in enumerate(files_iter):
- # gather metadata and update metadata_unique_values so that each metadatum
- # (e.g. source database or label) is represented by an integer.
- metadatum = dict()
-
- # keep track of source database and subset (train, development, or test)
- if file["database"] not in metadata_unique_values["database"]:
- metadata_unique_values["database"].append(file["database"])
- metadatum["database"] = metadata_unique_values["database"].index(
- file["database"]
- )
- metadatum["subset"] = Subsets.index(file["subset"])
-
- # keep track of speaker label scope (file, database, or global) for speaker diarization protocols
- if isinstance(self.protocol, SpeakerDiarizationProtocol):
- metadatum["scope"] = Scopes.index(file["scope"])
-
- # keep track of list of classes for regular segmentation protocols
- # Different files may be annotated using a different set of classes
- # (e.g. one database for speech/music/noise, and another one for male/female/child)
- if isinstance(self.protocol, SegmentationProtocol):
- if "classes" in file:
- local_classes = file["classes"]
- else:
- local_classes = file["annotation"].labels()
-
- # if task was not initialized with a fixed list of classes,
- # we build it as the union of all classes found in files
- if self.classes is None:
- for klass in local_classes:
- if klass not in classes:
- classes.append(klass)
- annotated_classes.append(
- [classes.index(klass) for klass in local_classes]
- )
-
- # if task was initialized with a fixed list of classes,
- # we make sure that all files use a subset of these classes
- # if they don't, we issue a warning and ignore the extra classes
- else:
- extra_classes = set(local_classes) - set(self.classes)
- if extra_classes:
- warnings.warn(
- f"Ignoring extra classes ({', '.join(extra_classes)}) found for file {file['uri']} ({file['database']}). "
- )
- annotated_classes.append(
- [
- self.classes.index(klass)
- for klass in set(local_classes) & set(self.classes)
- ]
- )
-
- remaining_metadata_keys = set(file) - set(
- [
- "uri",
- "database",
- "subset",
- "audio",
- "torchaudio.info",
- "scope",
- "classes",
- "annotation",
- "annotated",
- ]
- )
-
- # keep track of any other (integer or string) metadata provided by the protocol
- # (e.g. a "domain" key for domain-adversarial training)
- for key in remaining_metadata_keys:
- value = file[key]
-
- if isinstance(value, str):
- if value not in metadata_unique_values[key]:
- metadata_unique_values[key].append(value)
- metadatum[key] = metadata_unique_values[key].index(value)
-
- elif isinstance(value, int):
- metadatum[key] = value
-
- else:
- warnings.warn(
- f"Ignoring '{key}' metadata because of its type ({type(value)}). Only str and int are supported for now.",
- category=UserWarning,
- )
-
- metadata.append(metadatum)
-
- database_unique_labels = list()
-
- # reset list of file-scoped labels
- file_unique_labels = list()
-
- # path to audio file
- audios.append(str(file["audio"]))
-
- # audio info
- audio_info = file["torchaudio.info"]
- audio_infos.append(
- (
- audio_info.sample_rate, # sample rate
- audio_info.num_frames, # number of frames
- audio_info.num_channels, # number of channels
- audio_info.bits_per_sample, # bits per sample
- )
- )
- audio_encodings.append(audio_info.encoding) # encoding
-
- # annotated regions and duration
- _annotated_duration = 0.0
- for segment in file["annotated"]:
- # skip annotated regions that are shorter than training chunk duration
- if segment.duration < duration:
- continue
-
- # append annotated region
- annotated_region = (
- file_id,
- segment.duration,
- segment.start,
- segment.end,
- )
- annotated_regions.append(annotated_region)
-
- # increment annotated duration
- _annotated_duration += segment.duration
-
- # append annotated duration
- annotated_duration.append(_annotated_duration)
-
- # annotations
- for segment, _, label in file["annotation"].itertracks(yield_label=True):
- # "scope" is provided by speaker diarization protocols to indicate
- # whether speaker labels are local to the file ('file'), consistent across
- # all files in a database ('database'), or globally consistent ('global')
-
- if "scope" in file:
- # 0 = 'file'
- # 1 = 'database'
- # 2 = 'global'
- scope = Scopes.index(file["scope"])
-
- # update list of file-scope labels
- if label not in file_unique_labels:
- file_unique_labels.append(label)
- # and convert label to its (file-scope) index
- file_label_idx = file_unique_labels.index(label)
-
- database_label_idx = global_label_idx = -1
-
- if scope > 0: # 'database' or 'global'
- # update list of database-scope labels
- if label not in database_unique_labels:
- database_unique_labels.append(label)
-
- # and convert label to its (database-scope) index
- database_label_idx = database_unique_labels.index(label)
-
- if scope > 1: # 'global'
- # update list of global-scope labels
- if label not in unique_labels:
- unique_labels.append(label)
- # and convert label to its (global-scope) index
- global_label_idx = unique_labels.index(label)
-
- # basic segmentation protocols do not provide "scope" information
- # as classes are global by definition
-
- else:
- try:
- file_label_idx = (
- database_label_idx
- ) = global_label_idx = classes.index(label)
- except ValueError:
- # skip labels that are not in the list of classes
- continue
-
- annotations.append(
- (
- file_id, # index of file
- segment.start, # start time
- segment.end, # end time
- file_label_idx, # file-scope label index
- database_label_idx, # database-scope label index
- global_label_idx, # global-scope index
- )
- )
-
- # since not all metadata keys are present in all files, fallback to -1 when a key is missing
- metadata = [
- tuple(metadatum.get(key, -1) for key in metadata_unique_values)
- for metadatum in metadata
- ]
- dtype = [(key, "i") for key in metadata_unique_values]
- self.metadata = np.array(metadata, dtype=dtype)
-
- # NOTE: read with str(self.audios[file_id], encoding='utf-8')
- self.audios = np.array(audios, dtype=np.string_)
-
- # turn list of files metadata into a single numpy array
- # TODO: improve using https://github.com/pytorch/pytorch/issues/13246#issuecomment-617140519
-
- dtype = [
- ("sample_rate", "i"),
- ("num_frames", "i"),
- ("num_channels", "i"),
- ("bits_per_sample", "i"),
- ]
- self.audio_infos = np.array(audio_infos, dtype=dtype)
- self.audio_encodings = np.array(audio_encodings, dtype=np.string_)
-
- self.annotated_duration = np.array(annotated_duration)
-
- # turn list of annotated regions into a single numpy array
- dtype = [("file_id", "i"), ("duration", "f"), ("start", "f"), ("end", "f")]
- self.annotated_regions = np.array(annotated_regions, dtype=dtype)
-
- # convert annotated_classes (which is a list of list of classes, one list of classes per file)
- # into a single (num_files x num_classes) numpy array:
- # * True indicates that this particular class was annotated for this particular file (though it may not be active in this file)
- # * False indicates that this particular class was not even annotated (i.e. its absence does not imply that it is not active in this file)
- if isinstance(self.protocol, SegmentationProtocol) and self.classes is None:
- self.classes = classes
- self.annotated_classes = np.zeros(
- (len(annotated_classes), len(self.classes)), dtype=np.bool_
- )
- for file_id, classes in enumerate(annotated_classes):
- self.annotated_classes[file_id, classes] = True
-
- # turn list of annotations into a single numpy array
- dtype = [
- ("file_id", "i"),
- ("start", "f"),
- ("end", "f"),
- ("file_label_idx", "i"),
- ("database_label_idx", "i"),
- ("global_label_idx", "i"),
- ]
- self.annotations = np.array(annotations, dtype=dtype)
-
- self.metadata_unique_values = metadata_unique_values
-
- if not self.has_validation:
- return
-
- validation_chunks = list()
-
- # obtain indexes of files in the validation subset
- validation_file_ids = np.where(
- self.metadata["subset"] == Subsets.index("development")
- )[0]
-
- # iterate over files in the validation subset
- for file_id in validation_file_ids:
- # get annotated regions in file
- annotated_regions = self.annotated_regions[
- self.annotated_regions["file_id"] == file_id
- ]
-
- # iterate over annotated regions
- for annotated_region in annotated_regions:
- # number of chunks in annotated region
- num_chunks = round(annotated_region["duration"] // duration)
-
- # iterate over chunks
- for c in range(num_chunks):
- start_time = annotated_region["start"] + c * duration
- validation_chunks.append((file_id, start_time, duration))
-
- dtype = [("file_id", "i"), ("start", "f"), ("duration", "f")]
- self.validation_chunks = np.array(validation_chunks, dtype=dtype)
-
def default_metric(
self,
) -> Union[Metric, Sequence[Metric], Dict[str, Metric]]:
@@ -419,15 +103,17 @@ def train__iter__helper(self, rng: random.Random, **filters):
"""
# indices of training files that matches domain filters
- training = self.metadata["subset"] == Subsets.index("train")
+ training = self.prepared_data["audio-metadata"]["subset"] == Subsets.index(
+ "train"
+ )
for key, value in filters.items():
- training &= self.metadata[key] == self.metadata_unique_values[key].index(
- value
- )
+ training &= self.prepared_data["audio-metadata"][key] == self.prepared_data[
+ "metadata"
+ ][key].index(value)
file_ids = np.where(training)[0]
# turn annotated duration into a probability distribution
- annotated_duration = self.annotated_duration[file_ids]
+ annotated_duration = self.prepared_data["audio-annotated"][file_ids]
cum_prob_annotated_duration = np.cumsum(
annotated_duration / np.sum(annotated_duration)
)
@@ -444,14 +130,19 @@ def train__iter__helper(self, rng: random.Random, **filters):
for _ in range(num_chunks_per_file):
# find indices of annotated regions in this file
annotated_region_indices = np.where(
- self.annotated_regions["file_id"] == file_id
+ self.prepared_data["annotations-regions"]["file_id"] == file_id
)[0]
# turn annotated regions duration into a probability distribution
+
cum_prob_annotated_regions_duration = np.cumsum(
- self.annotated_regions["duration"][annotated_region_indices]
+ self.prepared_data["annotations-regions"]["duration"][
+ annotated_region_indices
+ ]
/ np.sum(
- self.annotated_regions["duration"][annotated_region_indices]
+ self.prepared_data["annotations-regions"]["duration"][
+ annotated_region_indices
+ ]
)
)
@@ -461,8 +152,10 @@ def train__iter__helper(self, rng: random.Random, **filters):
]
# select one chunk at random in this annotated region
- _, _, start, end = self.annotated_regions[annotated_region_index]
- start_time = rng.uniform(start, end - duration)
+ _, region_duration, start = self.prepared_data["annotations-regions"][
+ annotated_region_index
+ ]
+ start_time = rng.uniform(start, start + region_duration - duration)
yield self.prepare_chunk(file_id, start_time, duration)
@@ -492,7 +185,7 @@ def train__iter__(self):
# create a subchunk generator for each combination of "balance" keys
subchunks = dict()
for product in itertools.product(
- *[self.metadata_unique_values[key] for key in balance]
+ *[self.prepared_data["metadata"][key] for key in balance]
):
# we iterate on the cartesian product of the values in metadata_unique_values
# eg: for balance=["database", "split"], with 2 databases and 2 splits:
@@ -564,11 +257,48 @@ def collate_fn(self, batch, stage="train"):
def train__len__(self):
# Number of training samples in one epoch
- duration = np.sum(self.annotated_duration)
+ duration = np.sum(self.prepared_data["audio-annotated"])
return max(self.batch_size, math.ceil(duration / self.duration))
+ def prepare_validation(self, prepared_data: Dict):
+ validation_chunks = list()
+
+ # obtain indexes of files in the validation subset
+ validation_file_ids = np.where(
+ prepared_data["audio-metadata"]["subset"] == Subsets.index("development")
+ )[0]
+
+ # iterate over files in the validation subset
+ for file_id in validation_file_ids:
+ # get annotated regions in file
+ annotated_regions = prepared_data["annotations-regions"][
+ prepared_data["annotations-regions"]["file_id"] == file_id
+ ]
+
+ # iterate over annotated regions
+ for annotated_region in annotated_regions:
+ # number of chunks in annotated region
+ num_chunks = round(annotated_region["duration"] // self.duration)
+
+ # iterate over chunks
+ for c in range(num_chunks):
+ start_time = annotated_region["start"] + c * self.duration
+ validation_chunks.append((file_id, start_time, self.duration))
+
+ dtype = [
+ (
+ "file_id",
+ get_dtype(max(v[0] for v in validation_chunks), unsigned=True),
+ ),
+ ("start", "f"),
+ ("duration", "f"),
+ ]
+
+ prepared_data["validation"] = np.array(validation_chunks, dtype=dtype)
+ validation_chunks.clear()
+
def val__getitem__(self, idx):
- validation_chunk = self.validation_chunks[idx]
+ validation_chunk = self.prepared_data["validation"][idx]
return self.prepare_chunk(
validation_chunk["file_id"],
validation_chunk["start"],
@@ -576,7 +306,7 @@ def val__getitem__(self, idx):
)
def val__len__(self):
- return len(self.validation_chunks)
+ return len(self.prepared_data["validation"])
def validation_step(self, batch, batch_idx: int):
"""Compute validation area under the ROC curve
diff --git a/pyannote/audio/tasks/segmentation/multilabel.py b/pyannote/audio/tasks/segmentation/multilabel.py
index c1d58431a..c641b5c3e 100644
--- a/pyannote/audio/tasks/segmentation/multilabel.py
+++ b/pyannote/audio/tasks/segmentation/multilabel.py
@@ -20,6 +20,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+import itertools
+import textwrap
from typing import Dict, List, Optional, Sequence, Text, Tuple, Union
import numpy as np
@@ -31,11 +33,11 @@
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
from torchmetrics import Metric
-from pyannote.audio.core.task import Problem, Resolution, Specifications, Task
-from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin
+from pyannote.audio.core.task import Problem, Resolution, Specifications
+from pyannote.audio.tasks.segmentation.mixins import SegmentationTask
-class MultiLabelSegmentation(SegmentationTaskMixin, Task):
+class MultiLabelSegmentation(SegmentationTask):
"""Generic multi-label segmentation
Multi-label segmentation is the process of detecting temporal intervals
@@ -47,7 +49,13 @@ class MultiLabelSegmentation(SegmentationTaskMixin, Task):
Parameters
----------
protocol : Protocol
- pyannote.database protocol
+ cache : str, optional
+ As (meta-)data preparation might take a very long time for large datasets,
+ it can be cached to disk for later (and faster!) re-use.
+ When `cache` does not exist, `Task.prepare_data()` generates training
+ and validation metadata from `protocol` and save them to disk.
+ When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
+ are loaded from disk. Defaults to a temporary path.
classes : List[str], optional
List of classes. Defaults to the list of classes available in the training set.
duration : float, optional
@@ -84,6 +92,7 @@ class MultiLabelSegmentation(SegmentationTaskMixin, Task):
def __init__(
self,
protocol: Protocol,
+ cache: Optional[Union[str, None]] = None,
classes: Optional[List[str]] = None,
duration: float = 2.0,
warm_up: Union[float, Tuple[float, float]] = 0.0,
@@ -109,6 +118,7 @@ def __init__(
pin_memory=pin_memory,
augmentation=augmentation,
metric=metric,
+ cache=cache,
)
self.balance = balance
@@ -119,11 +129,114 @@ def __init__(
# classes should be detected. therefore, we postpone the definition of
# specifications to setup()
- def setup(self):
- super().setup()
+ def post_prepare_data(self, prepared_data: Dict):
+ # as different files may be annotated using a different set of classes
+ # (e.g. one database for speech/music/noise, and another one for
+ # male/female/child), we keep track of this information. this is used
+ # to know whether a missing class is considered a negative example (0) or
+ # simple an unknown example (-1)
+
+ if self.classes is None and not self.has_classes:
+ msg = textwrap.dedent(
+ """
+ Could not infer list of classes. Either provide a list of classes when
+ instantiating the task, or make sure that the training protocol provides
+ a 'classes' entry. See https://github.com/pyannote/pyannote-database#segmentation
+ for more details.
+ """
+ )
+
+ if self.has_validation:
+ files_iter = itertools.chain(
+ self.protocol.train(), self.protocol.development()
+ )
+ else:
+ files_iter = self.protocol.train()
+
+ if self.classes is None:
+ classes = list() # overall list of classes
+ annotated_classes = list() # list of annotated classes (per file)
+
+ for file in files_iter:
+ file_classes = file.get("classes", None)
+
+ if not file_classes:
+ msg = textwrap.dedent(
+ f"""
+ File "{file['uri']}" (from {file['database']} database) does not
+ provide a 'classes' entry. Please make sure the corresponding
+ training protocol provides a 'classes' entry for all files. See
+ https://github.com/pyannote/pyannote-database#segmentation for more
+ details.
+ """
+ )
+ raise ValueError(msg)
+
+ for klass in file_classes:
+ if klass not in classes:
+ classes.append(klass)
+ annotated_classes.append(
+ [classes.index(klass) for klass in file_classes]
+ )
+
+ prepared_data["classes-list"] = np.array(classes, dtype=np.string_)
+ self.classes = classes
+
+ else:
+ annotated_classes = list() # list of annotated classes (per file)
+ for file in files_iter:
+ file_classes = file.get("classes", None)
+
+ if not file_classes:
+ msg = textwrap.dedent(
+ f"""
+ File "{file['uri']}" (from {file['database']} database) does not
+ provide a 'classes' entry. Please make sure the corresponding
+ training protocol provides a 'classes' entry for all files. See
+ https://github.com/pyannote/pyannote-database#segmentation for more
+ details.
+ """
+ )
+ raise ValueError(msg)
+
+ extra_classes = set(file_classes) - set(self.classes)
+ if extra_classes:
+ msg = textwrap.dedent(
+ f"""
+ File "{file['uri']}" (from {file['database']} database) provides
+ extra classes ({', '.join(extra_classes)}) that are ignored.
+ """
+ )
+ print(msg)
+
+ annotated_classes.append(
+ [
+ self.classes.index(klass)
+ for klass in set(file_classes) & set(self.classes)
+ ]
+ )
+
+ prepared_data["classes-list"] = np.array(self.classes, dtype=np.string_)
+
+ # convert annotated_classes (which is a list of list of classes, one list of classes per file)
+ # into a single (num_files x num_classes) numpy array:
+ # * True indicates that this particular class was annotated for this particular file
+ # (though it may not be active in this file)
+ # * False indicates that this particular class was not even annotated (i.e. its absence
+ # does not imply that it is not active in this file)
+ annotated_classes_array = np.zeros(
+ (len(annotated_classes), len(self.classes)), dtype=np.bool_
+ )
+ for file_id, classes in enumerate(annotated_classes):
+ annotated_classes_array[file_id, classes] = True
+ prepared_data["classes-annotated"] = annotated_classes_array
+ annotated_classes.clear()
+
+ def setup(self, stage=None):
+ super().setup(stage)
self.specifications = Specifications(
- classes=self.classes,
+ classes=self.prepared_data["classes-list"],
problem=Problem.MULTI_LABEL_CLASSIFICATION,
resolution=Resolution.FRAME,
duration=self.duration,
@@ -169,7 +282,9 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
sample = dict()
sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration)
# gather all annotations of current file
- annotations = self.annotations[self.annotations["file_id"] == file_id]
+ annotations = self.prepared_data["annotations-segments"][
+ self.prepared_data["annotations-segments"]["file_id"] == file_id
+ ]
# gather all annotations with non-empty intersection with current chunk
chunk_annotations = annotations[
@@ -184,9 +299,13 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
# frame-level targets (-1 for un-annotated classes)
y = -np.ones(
- (self.model.example_output.num_frames, len(self.classes)), dtype=np.int8
+ (
+ self.model.example_output.num_frames,
+ len(self.prepared_data["classes-list"]),
+ ),
+ dtype=np.int8,
)
- y[:, self.annotated_classes[file_id]] = 0
+ y[:, self.prepared_data["classes-annotated"][file_id]] = 0
for start, end, label in zip(
start_idx, end_idx, chunk_annotations["global_label_idx"]
):
@@ -196,7 +315,7 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
y, self.model.example_output.frames, labels=self.classes
)
- metadata = self.metadata[file_id]
+ metadata = self.prepared_data["audio-metadata"][file_id]
sample["meta"] = {key: metadata[key] for key in metadata.dtype.names}
sample["meta"]["file"] = file_id
diff --git a/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py b/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py
index 0b7209c5c..7249ed0f4 100644
--- a/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py
+++ b/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py
@@ -21,7 +21,7 @@
# SOFTWARE.
-from typing import Dict, Sequence, Text, Tuple, Union
+from typing import Dict, Optional, Sequence, Text, Tuple, Union
import numpy as np
from pyannote.core import Segment, SlidingWindowFeature
@@ -29,11 +29,11 @@
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
from torchmetrics import Metric
-from pyannote.audio.core.task import Problem, Resolution, Specifications, Task
-from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin
+from pyannote.audio.core.task import Problem, Resolution, Specifications
+from pyannote.audio.tasks.segmentation.mixins import SegmentationTask
-class OverlappedSpeechDetection(SegmentationTaskMixin, Task):
+class OverlappedSpeechDetection(SegmentationTask):
"""Overlapped speech detection
Overlapped speech detection is the task of detecting regions where at least
@@ -51,6 +51,13 @@ class OverlappedSpeechDetection(SegmentationTaskMixin, Task):
----------
protocol : Protocol
pyannote.database protocol
+ cache : str, optional
+ As (meta-)data preparation might take a very long time for large datasets,
+ it can be cached to disk for later (and faster!) re-use.
+ When `cache` does not exist, `Task.prepare_data()` generates training
+ and validation metadata from `protocol` and save them to disk.
+ When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
+ are loaded from disk. Defaults to a temporary path.
duration : float, optional
Chunks duration. Defaults to 2s.
warm_up : float or (float, float), optional
@@ -105,6 +112,7 @@ def __init__(
pin_memory: bool = False,
augmentation: BaseWaveformTransform = None,
metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None,
+ cache: Optional[Union[str, None]] = None,
):
super().__init__(
protocol,
@@ -115,6 +123,7 @@ def __init__(
pin_memory=pin_memory,
augmentation=augmentation,
metric=metric,
+ cache=cache,
)
self.specifications = Specifications(
@@ -163,7 +172,9 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration)
# gather all annotations of current file
- annotations = self.annotations[self.annotations["file_id"] == file_id]
+ annotations = self.prepared_data["annotations-segments"][
+ self.prepared_data["annotations-segments"]["file_id"] == file_id
+ ]
# gather all annotations with non-empty intersection with current chunk
chunk_annotations = annotations[
@@ -186,7 +197,7 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
y, self.model.example_output.frames, labels=["speech"]
)
- metadata = self.metadata[file_id]
+ metadata = self.prepared_data["audio-metadata"][file_id]
sample["meta"] = {key: metadata[key] for key in metadata.dtype.names}
sample["meta"]["file"] = file_id
diff --git a/pyannote/audio/tasks/segmentation/speaker_diarization.py b/pyannote/audio/tasks/segmentation/speaker_diarization.py
index 1094672ed..47c5adc63 100644
--- a/pyannote/audio/tasks/segmentation/speaker_diarization.py
+++ b/pyannote/audio/tasks/segmentation/speaker_diarization.py
@@ -23,7 +23,7 @@
import math
import warnings
from collections import Counter
-from typing import Dict, Literal, Sequence, Text, Tuple, Union
+from typing import Dict, Literal, Optional, Sequence, Text, Tuple, Union
import numpy as np
import torch
@@ -37,8 +37,8 @@
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
from torchmetrics import Metric
-from pyannote.audio.core.task import Problem, Resolution, Specifications, Task
-from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin
+from pyannote.audio.core.task import Problem, Resolution, Specifications
+from pyannote.audio.tasks.segmentation.mixins import SegmentationTask
from pyannote.audio.torchmetrics import (
DiarizationErrorRate,
FalseAlarmRate,
@@ -58,13 +58,20 @@
Scopes = list(Scope.__args__)
-class SpeakerDiarization(SegmentationTaskMixin, Task):
+class SpeakerDiarization(SegmentationTask):
"""Speaker diarization
Parameters
----------
protocol : SpeakerDiarizationProtocol
pyannote.database protocol
+ cache : str, optional
+ As (meta-)data preparation might take a very long time for large datasets,
+ it can be cached to disk for later (and faster!) re-use.
+ When `cache` does not exist, `Task.prepare_data()` generates training
+ and validation metadata from `protocol` and save them to disk.
+ When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
+ are loaded from disk. Defaults to a temporary path.
duration : float, optional
Chunks duration. Defaults to 2s.
max_speakers_per_chunk : int, optional
@@ -127,6 +134,7 @@ class SpeakerDiarization(SegmentationTaskMixin, Task):
def __init__(
self,
protocol: SpeakerDiarizationProtocol,
+ cache: Optional[Union[str, None]] = None,
duration: float = 2.0,
max_speakers_per_chunk: int = None,
max_speakers_per_frame: int = None,
@@ -152,6 +160,7 @@ def __init__(
pin_memory=pin_memory,
augmentation=augmentation,
metric=metric,
+ cache=cache,
)
if not isinstance(protocol, SpeakerDiarizationProtocol):
@@ -186,28 +195,34 @@ def __init__(
self.weight = weight
self.vad_loss = vad_loss
- def setup(self):
- super().setup()
+ def setup(self, stage=None):
+ super().setup(stage)
# estimate maximum number of speakers per chunk when not provided
if self.max_speakers_per_chunk is None:
- training = self.metadata["subset"] == Subsets.index("train")
+ training = self.prepared_data["audio-metadata"]["subset"] == Subsets.index(
+ "train"
+ )
num_unique_speakers = []
progress_description = f"Estimating maximum number of speakers per {self.duration:g}s chunk in the training set"
for file_id in track(
np.where(training)[0], description=progress_description
):
- annotations = self.annotations[
- np.where(self.annotations["file_id"] == file_id)[0]
+ annotations = self.prepared_data["annotations-segments"][
+ np.where(
+ self.prepared_data["annotations-segments"]["file_id"] == file_id
+ )[0]
]
- annotated_regions = self.annotated_regions[
- np.where(self.annotated_regions["file_id"] == file_id)[0]
+ annotated_regions = self.prepared_data["annotations-regions"][
+ np.where(
+ self.prepared_data["annotations-regions"]["file_id"] == file_id
+ )[0]
]
for region in annotated_regions:
# find annotations within current region
region_start = region["start"]
- region_end = region["end"]
+ region_end = region["start"] + region["duration"]
region_annotations = annotations[
np.where(
(annotations["start"] >= region_start)
@@ -318,7 +333,7 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
file = self.get_file(file_id)
# get label scope
- label_scope = Scopes[self.metadata[file_id]["scope"]]
+ label_scope = Scopes[self.prepared_data["audio-metadata"][file_id]["scope"]]
label_scope_key = f"{label_scope}_label_idx"
#
@@ -328,7 +343,9 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration)
# gather all annotations of current file
- annotations = self.annotations[self.annotations["file_id"] == file_id]
+ annotations = self.prepared_data["annotations-segments"][
+ self.prepared_data["annotations-segments"]["file_id"] == file_id
+ ]
# gather all annotations with non-empty intersection with current chunk
chunk_annotations = annotations[
@@ -364,7 +381,7 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
y, self.model.example_output.frames, labels=labels
)
- metadata = self.metadata[file_id]
+ metadata = self.prepared_data["audio-metadata"][file_id]
sample["meta"] = {key: metadata[key] for key in metadata.dtype.names}
sample["meta"]["file"] = file_id
diff --git a/pyannote/audio/tasks/segmentation/voice_activity_detection.py b/pyannote/audio/tasks/segmentation/voice_activity_detection.py
index fd9eb8e75..183fa2ffc 100644
--- a/pyannote/audio/tasks/segmentation/voice_activity_detection.py
+++ b/pyannote/audio/tasks/segmentation/voice_activity_detection.py
@@ -20,7 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-from typing import Dict, Sequence, Text, Tuple, Union
+from typing import Dict, Optional, Sequence, Text, Tuple, Union
import numpy as np
from pyannote.core import Segment, SlidingWindowFeature
@@ -28,11 +28,11 @@
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
from torchmetrics import Metric
-from pyannote.audio.core.task import Problem, Resolution, Specifications, Task
-from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin
+from pyannote.audio.core.task import Problem, Resolution, Specifications
+from pyannote.audio.tasks.segmentation.mixins import SegmentationTask
-class VoiceActivityDetection(SegmentationTaskMixin, Task):
+class VoiceActivityDetection(SegmentationTask):
"""Voice activity detection
Voice activity detection (or VAD) is the task of detecting speech regions
@@ -45,6 +45,13 @@ class VoiceActivityDetection(SegmentationTaskMixin, Task):
----------
protocol : Protocol
pyannote.database protocol
+ cache : str, optional
+ As (meta-)data preparation might take a very long time for large datasets,
+ it can be cached to disk for later (and faster!) re-use.
+ When `cache` does not exist, `Task.prepare_data()` generates training
+ and validation metadata from `protocol` and save them to disk.
+ When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
+ are loaded from disk. Defaults to a temporary path.
duration : float, optional
Chunks duration. Defaults to 2s.
warm_up : float or (float, float), optional
@@ -79,6 +86,7 @@ class VoiceActivityDetection(SegmentationTaskMixin, Task):
def __init__(
self,
protocol: Protocol,
+ cache: Optional[Union[str, None]] = None,
duration: float = 2.0,
warm_up: Union[float, Tuple[float, float]] = 0.0,
balance: Sequence[Text] = None,
@@ -98,6 +106,7 @@ def __init__(
pin_memory=pin_memory,
augmentation=augmentation,
metric=metric,
+ cache=cache,
)
self.balance = balance
@@ -145,7 +154,9 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
sample["X"], _ = self.model.audio.crop(file, chunk, duration=duration)
# gather all annotations of current file
- annotations = self.annotations[self.annotations["file_id"] == file_id]
+ annotations = self.prepared_data["annotations-segments"][
+ self.prepared_data["annotations-segments"]["file_id"] == file_id
+ ]
# gather all annotations with non-empty intersection with current chunk
chunk_annotations = annotations[
@@ -167,7 +178,7 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
y, self.model.example_output.frames, labels=["speech"]
)
- metadata = self.metadata[file_id]
+ metadata = self.prepared_data["audio-metadata"][file_id]
sample["meta"] = {key: metadata[key] for key in metadata.dtype.names}
sample["meta"]["file"] = file_id
diff --git a/tests/tasks/test_reproducibility.py b/tests/tasks/test_reproducibility.py
index a7307e0cf..88f2b2d8a 100644
--- a/tests/tasks/test_reproducibility.py
+++ b/tests/tasks/test_reproducibility.py
@@ -3,7 +3,7 @@
from pyannote.database import FileFinder, get_protocol
from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel
-from pyannote.audio.tasks import MultiLabelSegmentation, VoiceActivityDetection
+from pyannote.audio.tasks import VoiceActivityDetection
def setup_tasks(task):
@@ -16,7 +16,8 @@ def setup_tasks(task):
def create_dl(model, task):
m = model(task=task)
- m.setup("fit")
+ m.prepare_data()
+ m.setup()
return task.train_dataloader()
@@ -31,35 +32,32 @@ def get_next5(dl):
def test_seeding_ensures_data_loaders():
"Setting a global seed for the dataloaders ensures that we get data back in the same order"
- for task in [VoiceActivityDetection, MultiLabelSegmentation]:
+ seed_everything(1)
+ protocol, vad = setup_tasks(VoiceActivityDetection)
+ dl = create_dl(SimpleSegmentationModel, vad)
+ last5a = get_next5(dl)
- seed_everything(1)
- protocol, vad = setup_tasks(task)
- dl = create_dl(SimpleSegmentationModel, vad)
- last5a = get_next5(dl)
+ seed_everything(1)
+ protocol, vad = setup_tasks(VoiceActivityDetection)
+ dl = create_dl(SimpleSegmentationModel, vad)
+ last5b = get_next5(dl)
- seed_everything(1)
- protocol, vad = setup_tasks(task)
- dl = create_dl(SimpleSegmentationModel, vad)
- last5b = get_next5(dl)
-
- for i in range(len(last5b)):
- assert torch.equal(last5a[i]["X"], last5b[i]["X"])
+ for i in range(len(last5b)):
+ assert torch.equal(last5a[i]["X"], last5b[i]["X"])
def test_different_seeds():
"Changing the global seed will change the order of the data that loads"
- for task in [VoiceActivityDetection, MultiLabelSegmentation]:
- protocol, vad = setup_tasks(task)
- seed_everything(4)
- dl = create_dl(SimpleSegmentationModel, vad)
- last5a = get_next5(dl)
+ protocol, vad = setup_tasks(VoiceActivityDetection)
+ seed_everything(4)
+ dl = create_dl(SimpleSegmentationModel, vad)
+ last5a = get_next5(dl)
- protocol, vad = setup_tasks(task)
- seed_everything(5)
- dl = create_dl(SimpleSegmentationModel, vad)
- last5b = get_next5(dl)
+ protocol, vad = setup_tasks(VoiceActivityDetection)
+ seed_everything(5)
+ dl = create_dl(SimpleSegmentationModel, vad)
+ last5b = get_next5(dl)
- for i in range(5):
- assert not torch.equal(last5a[i]["X"], last5b[i]["X"])
+ for i in range(5):
+ assert not torch.equal(last5a[i]["X"], last5b[i]["X"])
diff --git a/tests/test_train.py b/tests/test_train.py
index 7a7bfe338..6a7a6c69b 100644
--- a/tests/test_train.py
+++ b/tests/test_train.py
@@ -1,11 +1,39 @@
+# The MIT License (MIT)
+#
+# Copyright (c) 2024- CNRS
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+
+from tempfile import mkstemp
+
import pytest
from pyannote.database import FileFinder, get_protocol
from pytorch_lightning import Trainer
+from pyannote.audio.models.embedding.debug import SimpleEmbeddingModel
from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel
from pyannote.audio.tasks import (
+ MultiLabelSegmentation,
OverlappedSpeechDetection,
SpeakerDiarization,
+ SupervisedRepresentationLearningWithArcFace,
VoiceActivityDetection,
)
@@ -17,6 +45,31 @@ def protocol():
)
+@pytest.fixture()
+def cache():
+ return mkstemp()[1]
+
+
+@pytest.fixture()
+def gender_protocol():
+ def to_gender(file):
+ annotation = file["annotation"]
+ mapping = {label: label[0] for label in annotation.labels()}
+ return annotation.rename_labels(mapping)
+
+ def classes(file):
+ return ["M", "F"]
+
+ return get_protocol(
+ "Debug.SpeakerDiarization.Debug",
+ preprocessors={
+ "audio": FileFinder(),
+ "annotation": to_gender,
+ "classes": classes,
+ },
+ )
+
+
def test_train_segmentation(protocol):
segmentation = SpeakerDiarization(protocol)
model = SimpleSegmentationModel(task=segmentation)
@@ -24,6 +77,48 @@ def test_train_segmentation(protocol):
trainer.fit(model)
+def test_train_segmentation_with_cached_data_mono_device(protocol, cache):
+ first_task = SpeakerDiarization(protocol, cache=cache)
+ first_model = SimpleSegmentationModel(task=first_task)
+ first_trainer = Trainer(fast_dev_run=True, accelerator="cpu", devices=1)
+ first_trainer.fit(first_model)
+
+ second_task = SpeakerDiarization(protocol, cache=cache)
+ second_model = SimpleSegmentationModel(task=second_task)
+ second_trainer = Trainer(fast_dev_run=True, accelerator="cpu", devices=1)
+ second_trainer.fit(second_model)
+
+
+def test_train_multilabel_segmentation(gender_protocol):
+ multilabel_segmentation = MultiLabelSegmentation(gender_protocol)
+ model = SimpleSegmentationModel(task=multilabel_segmentation)
+ trainer = Trainer(fast_dev_run=True, accelerator="cpu")
+ trainer.fit(model)
+
+
+def test_train_multilabel_segmentation_with_cached_data_mono_device(
+ gender_protocol, cache
+):
+ first_task = MultiLabelSegmentation(gender_protocol, cache=cache)
+ first_model = SimpleSegmentationModel(task=first_task)
+ first_trainer = Trainer(fast_dev_run=True, accelerator="cpu", devices=1)
+ first_trainer.fit(first_model)
+
+ second_task = MultiLabelSegmentation(gender_protocol, cache=cache)
+ second_model = SimpleSegmentationModel(task=second_task)
+ second_trainer = Trainer(fast_dev_run=True, accelerator="cpu", devices=1)
+ second_trainer.fit(second_model)
+
+
+def test_train_supervised_representation_with_arcface(protocol):
+ supervised_representation_with_arface = SupervisedRepresentationLearningWithArcFace(
+ protocol
+ )
+ model = SimpleEmbeddingModel(task=supervised_representation_with_arface)
+ trainer = Trainer(fast_dev_run=True, accelerator="cpu")
+ trainer.fit(model)
+
+
def test_train_voice_activity_detection(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
@@ -31,6 +126,18 @@ def test_train_voice_activity_detection(protocol):
trainer.fit(model)
+def test_train_voice_activity_detection_with_cached_data_mono_device(protocol, cache):
+ first_task = VoiceActivityDetection(protocol, cache=cache)
+ first_model = SimpleSegmentationModel(task=first_task)
+ first_trainer = Trainer(fast_dev_run=True, accelerator="cpu", devices=1)
+ first_trainer.fit(first_model)
+
+ second_task = VoiceActivityDetection(protocol, cache=cache)
+ second_model = SimpleSegmentationModel(task=second_task)
+ second_trainer = Trainer(fast_dev_run=True, accelerator="cpu", devices=1)
+ second_trainer.fit(second_model)
+
+
def test_train_overlapped_speech_detection(protocol):
overlapped_speech_detection = OverlappedSpeechDetection(protocol)
model = SimpleSegmentationModel(task=overlapped_speech_detection)
@@ -38,6 +145,20 @@ def test_train_overlapped_speech_detection(protocol):
trainer.fit(model)
+def test_train_overlapped_speech_detection_with_cached_data_mono_device(
+ protocol, cache
+):
+ first_task = OverlappedSpeechDetection(protocol, cache=cache)
+ first_model = SimpleSegmentationModel(task=first_task)
+ first_trainer = Trainer(fast_dev_run=True, accelerator="cpu", devices=1)
+ first_trainer.fit(first_model)
+
+ second_task = OverlappedSpeechDetection(protocol, cache=cache)
+ second_model = SimpleSegmentationModel(task=second_task)
+ second_trainer = Trainer(fast_dev_run=True, accelerator="cpu", devices=1)
+ second_trainer.fit(second_model)
+
+
def test_finetune_with_task_that_does_not_need_setup_for_specs(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
@@ -62,6 +183,18 @@ def test_finetune_with_task_that_needs_setup_for_specs(protocol):
trainer.fit(model)
+def test_finetune_with_task_that_needs_setup_for_specs_and_with_cache(protocol, cache):
+ segmentation = SpeakerDiarization(protocol, cache=cache)
+ model = SimpleSegmentationModel(task=segmentation)
+ trainer = Trainer(fast_dev_run=True, accelerator="cpu")
+ trainer.fit(model)
+
+ segmentation = SpeakerDiarization(protocol, cache=cache)
+ model.task = segmentation
+ trainer = Trainer(fast_dev_run=True, accelerator="cpu")
+ trainer.fit(model)
+
+
def test_transfer_with_task_that_does_not_need_setup_for_specs(protocol):
segmentation = SpeakerDiarization(protocol)
model = SimpleSegmentationModel(task=segmentation)
@@ -94,7 +227,22 @@ def test_finetune_freeze_with_task_that_needs_setup_for_specs(protocol):
segmentation = SpeakerDiarization(protocol)
model.task = segmentation
- model.freeze_up_to("mfcc")
+ model.freeze_by_name("mfcc")
+ trainer = Trainer(fast_dev_run=True, accelerator="cpu")
+ trainer.fit(model)
+
+
+def test_finetune_freeze_with_task_that_needs_setup_for_specs_and_with_cache(
+ protocol, cache
+):
+ segmentation = SpeakerDiarization(protocol, cache=cache)
+ model = SimpleSegmentationModel(task=segmentation)
+ trainer = Trainer(fast_dev_run=True, accelerator="cpu")
+ trainer.fit(model)
+
+ segmentation = SpeakerDiarization(protocol, cache=cache)
+ model.task = segmentation
+ model.freeze_by_name("mfcc")
trainer = Trainer(fast_dev_run=True, accelerator="cpu")
trainer.fit(model)
@@ -107,7 +255,23 @@ def test_finetune_freeze_with_task_that_does_not_need_setup_for_specs(protocol):
vad = VoiceActivityDetection(protocol)
model.task = vad
- model.freeze_up_to("mfcc")
+ model.freeze_by_name("mfcc")
+ trainer = Trainer(fast_dev_run=True, accelerator="cpu")
+ trainer.fit(model)
+
+
+def test_finetune_freeze_with_task_that_does_not_need_setup_for_specs_and_with_cache(
+ protocol,
+ cache,
+):
+ vad = VoiceActivityDetection(protocol, cache=cache)
+ model = SimpleSegmentationModel(task=vad)
+ trainer = Trainer(fast_dev_run=True, accelerator="cpu")
+ trainer.fit(model)
+
+ vad = VoiceActivityDetection(protocol, cache=cache)
+ model.task = vad
+ model.freeze_by_name("mfcc")
trainer = Trainer(fast_dev_run=True, accelerator="cpu")
trainer.fit(model)
@@ -120,7 +284,7 @@ def test_transfer_freeze_with_task_that_does_not_need_setup_for_specs(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model.task = voice_activity_detection
- model.freeze_up_to("mfcc")
+ model.freeze_by_name("mfcc")
trainer = Trainer(fast_dev_run=True, accelerator="cpu")
trainer.fit(model)
@@ -133,6 +297,6 @@ def test_transfer_freeze_with_task_that_needs_setup_for_specs(protocol):
segmentation = SpeakerDiarization(protocol)
model.task = segmentation
- model.freeze_up_to("mfcc")
+ model.freeze_by_name("mfcc")
trainer = Trainer(fast_dev_run=True, accelerator="cpu")
trainer.fit(model)
diff --git a/tutorials/MRE_template.ipynb b/tutorials/MRE_template.ipynb
index 7a44c1449..70ffacbc2 100644
--- a/tutorials/MRE_template.ipynb
+++ b/tutorials/MRE_template.ipynb
@@ -2217,4 +2217,4 @@
"outputs": []
}
]
-}
\ No newline at end of file
+}
diff --git a/tutorials/adapting_pretrained_pipeline.ipynb b/tutorials/adapting_pretrained_pipeline.ipynb
index 06d318809..d7096fb77 100644
--- a/tutorials/adapting_pretrained_pipeline.ipynb
+++ b/tutorials/adapting_pretrained_pipeline.ipynb
@@ -344,6 +344,7 @@
" loss=\"bce\", \n",
" vad_loss=\"bce\")\n",
"model.task = task\n",
+ "model.prepare_data()\n",
"model.setup(stage=\"fit\")"
]
},
diff --git a/tutorials/add_your_own_task.ipynb b/tutorials/add_your_own_task.ipynb
index 6e1575dc8..38daa73fb 100644
--- a/tutorials/add_your_own_task.ipynb
+++ b/tutorials/add_your_own_task.ipynb
@@ -147,13 +147,14 @@
"metadata": {},
"outputs": [],
"source": [
- "from typing import Optional\n",
- "import torch\n",
- "import torch.nn as nn\n",
+ "from math import ceil\n",
+ "from typing import Dict, Optional,Tuple, Union\n",
"import numpy as np\n",
- "from pyannote.core import Annotation\n",
- "from pyannote.audio import Model\n",
+ "from pyannote.core import Segment, SlidingWindow\n",
+ "from pyannote.audio.utils.random import create_rng_for_worker\n",
"from pyannote.audio.core.task import Task, Resolution\n",
+ "from pyannote.database import Protocol\n",
+ "from torchmetrics.classification import MultilabelAUROC\n",
"\n",
"# Your custom task must be a subclass of `pyannote.audio.core.task.Task`\n",
"class SoundEventDetection(Task):\n",
@@ -163,11 +164,13 @@
" self,\n",
" protocol: Protocol,\n",
" duration: float = 5.0,\n",
+ " min_duration: float = 5.0,\n",
" warm_up: Union[float, Tuple[float, float]] = 0.0,\n",
" batch_size: int = 32,\n",
" num_workers: int = None,\n",
" pin_memory: bool = False,\n",
- " augmentation: BaseWaveformTransform = None,\n",
+ " augmentation = None,\n",
+ " cache: Optional[Union[str, None]] = None,\n",
" **other_params,\n",
" ):\n",
"\n",
@@ -180,28 +183,62 @@
" num_workers=num_workers,\n",
" pin_memory=pin_memory,\n",
" augmentation=augmentation,\n",
+ " cache=cache,\n",
" )\n",
+ " \n",
+ " def prepare_data(self):\n",
+ " # this method is called to prepare data from the specified protocol. \n",
+ " # For most tasks, calling Task.prepare_data() is sufficient. If you \n",
+ " # need to prepare task-specific data, define a post_prepare_data method for your task.\n",
+ " super().prepare_data()\n",
"\n",
- " def setup(self):\n",
+ " def post_prepare_data(self, prepared_data: Dict):\n",
+ " # this method is called at the end of Task.prepare_data() \n",
+ " # to complete data preparation with task-specific data, here \n",
+ " # the list of classes and some training metadata\n",
"\n",
" # load metadata for training subset\n",
- " self.train_metadata_ = list()\n",
+ " prepared_data[\"train_metadata\"] = list()\n",
" for training_file in self.protocol.train():\n",
- " self.training_metadata_.append({\n",
+ " prepared_data[\"train_metadata\"].append({\n",
" # path to audio file (str)\n",
" \"audio\": training_file[\"audio\"],\n",
" # duration of audio file (float)\n",
- " \"duration\": training_file[\"duration\"],\n",
+ " \"duration\": training_file[\"torchaudio.info\"].num_frames / training_file[\"torchaudio.info\"].sample_rate,\n",
" # reference annotation (pyannote.core.Annotation)\n",
" \"annotation\": training_file[\"annotation\"],\n",
" })\n",
"\n",
" # gather the list of classes\n",
" classes = set()\n",
- " for training_file in self.train_metadata_:\n",
- " classes.update(training_file[\"reference\"].labels())\n",
- " classes = sorted(classes)\n",
+ " for training_file in prepared_data[\"train_metadata\"]:\n",
+ " classes.update(training_file[\"annotation\"].labels())\n",
+ " prepared_data[\"classes\"] = sorted(classes)\n",
+ "\n",
+ " # `has_validation` is True if protocol defines a development set\n",
+ " if not self.has_validation:\n",
+ " return\n",
+ " \n",
+ " def prepare_validation(self, prepared_data : Dict):\n",
+ " # this method is called at the end of Task.prepare_data(), to complete data preparation\n",
+ " # with task validation elements\n",
+ " \n",
+ " # load metadata for validation subset\n",
+ " prepared_data[\"validation\"] = list()\n",
+ " for validation_file in self.protocol.development():\n",
+ " prepared_data[\"validation\"].append({\n",
+ " \"audio\": validation_file[\"audio\"],\n",
+ " \"num_samples\": validation_file[\"torchaudio.info\"].num_frames,\n",
+ " \"annotation\": validation_file[\"annotation\"],\n",
+ " })\n",
+ " \n",
+ " \n",
+ " def setup(self, stage: Optional[Union[str, None]] = None):\n",
+ " # this method assigns prepared data from task.prepare_data() to the task\n",
+ " # and declares the task specifications\n",
"\n",
+ " super().setup(stage)\n",
+ " \n",
" # specify the addressed problem\n",
" self.specifications = Specifications(\n",
" # it is a multi-label classification problem\n",
@@ -212,22 +249,13 @@
" # the model will ingest chunks with that duration (in seconds)\n",
" duration=self.duration,\n",
" # human-readable names of classes\n",
- " classes=classes)\n",
- "\n",
- " # `has_validation` is True iff protocol defines a development set\n",
- " if not self.has_validation:\n",
- " return\n",
- "\n",
- " # load metadata for validation subset\n",
- " self.validation_metadata_ = list()\n",
- " for validation_file in self.protocol.development():\n",
- " self.validation_metadata_.append({\n",
- " \"audio\": validation_file[\"audio\"],\n",
- " \"num_samples\": math.floor(validation_file[\"duration\"] / self.duration),\n",
- " \"annotation\": validation_file[\"annotation\"],\n",
- " })\n",
- " \n",
- " \n",
+ " classes=self.prepared_data[\"classes\"])\n",
+ " \n",
+ " def default_metric(self):\n",
+ " # this method defines the default metrics used to evaluate the model during\n",
+ " # a training\n",
+ " num_classes = len(self.specifications.classes)\n",
+ " return MultilabelAUROC(num_classes, average=\"macro\", compute_on_cpu=True)\n",
"\n",
" def train__iter__(self):\n",
" # this method generates training samples, one at a time, \"ad infinitum\". each worker \n",
@@ -246,7 +274,7 @@
" while True:\n",
"\n",
" # select training file at random\n",
- " random_training_file, *_ = rng.choices(self.train_metadata_, k=1)\n",
+ " random_training_file, *_ = rng.choices(self.prepared_data[\"train_metadata\"], k=1)\n",
"\n",
" # select one chunk at random \n",
" random_start_time = rng.uniform(0, random_training_file[\"duration\"] - self.duration)\n",
@@ -275,8 +303,8 @@
" # we compute this number as the total duration of the training set divided by \n",
" # duration of training chunks. we make sure that an epoch is at least one batch long,\n",
" # or pytorch-lightning will complain\n",
- " train_duration = sum(training_file[\"duration\"] for training_file in self.train_metadata_)\n",
- " return max(self.batch_size, math.ceil(train_duration / self.duration))\n",
+ " train_duration = sum(training_file[\"duration\"] for training_file in self.prepared_data[\"train_metadata\"])\n",
+ " return max(self.batch_size, ceil(train_duration / self.duration))\n",
"\n",
" def val__getitem__(self, sample_idx):\n",
"\n",
@@ -287,9 +315,9 @@
"\n",
" # find which part of the validation set corresponds to sample_idx\n",
" num_samples = np.cumsum([\n",
- " validation_file[\"num_samples\"] for validation_file in self.validation_metadata_])\n",
+ " validation_file[\"num_samples\"] for validation_file in self.prepared_data[\"validation\"]])\n",
" file_idx = np.where(num_samples < sample_idx)[0][0]\n",
- " validation_file = self.validation_metadata_[file_idx]\n",
+ " validation_file = self.prepared_data[\"validation\"][file_idx]\n",
" idx = sample_idx - (num_samples[file_idx] - validation_file[\"num_samples\"]) \n",
" chunk = SlidingWindow(start=0., duration=self.duration, step=self.duration)[idx]\n",
"\n",
@@ -299,7 +327,7 @@
" # load labels corresponding to random chunk as {0|1} numpy array\n",
" # y[k] = 1 means that kth class is active\n",
" y = np.zeros((num_classes,))\n",
- " active_classes = validaiton_file[\"annotation\"].crop(chunk).labels()\n",
+ " active_classes = validation_file[\"annotation\"].crop(chunk).labels()\n",
" for active_class in active_classes:\n",
" y[classes.index(active_class)] = 1\n",
"\n",
@@ -307,7 +335,7 @@
"\n",
" def val__len__(self):\n",
" return sum(validation_file[\"num_samples\"] \n",
- " for validation_file in self.validation_metadata_)\n",
+ " for validation_file in self.prepared_data[\"validation\"])\n",
"\n",
" # `pyannote.audio.core.task.Task` base class provides a `LightningModule.training_step` and \n",
" # `LightningModule.validation_step` methods that rely on self.specifications to guess which \n",
@@ -323,14 +351,16 @@
"\n",
" # pyannote.audio.tasks.segmentation.mixin also provides a convenient mixin\n",
" # for \"segmentation\" tasks (ie. with Resolution.FRAME) that already defines\n",
- " # a bunch of useful methods. \n"
+ " # a bunch of useful methods. You can use it by inheriting your task from the \n",
+ " # pyannote.audio.tasks.segmentation.mixinSegmentationTask\n"
]
}
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3.8.5 64-bit ('pyannote-audio-v2': conda)",
- "name": "python385jvsc74a57bd0af55542e943232842f746a64555e4e006c72c98a3a863e85e6cbaf12772fa219"
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -342,7 +372,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.8.5"
+ "version": "3.10.13"
}
},
"nbformat": 4,
From b41b176e3ff28e4c9daeaf7de6e4f80133e44dc9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Cl=C3=A9ment=20Pag=C3=A9s?=
<55240756+clement-pages@users.noreply.github.com>
Date: Mon, 15 Jan 2024 13:11:21 +0100
Subject: [PATCH 40/57] fix: fix support for non-ASCII characters
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Fixes #1608 #1612
Co-authored-by: Hervé BREDIN
---
pyannote/audio/core/task.py | 12 ++++++------
pyannote/audio/tasks/segmentation/mixins.py | 5 ++---
.../audio/tasks/segmentation/multilabel.py | 10 +++++-----
tests/data/database.yml | 1 +
tests/data/debug.train.lst | 2 +-
tests/data/debug.train.rttm | 18 +++++++++---------
.../data/tr\303\26100.wav" | Bin
7 files changed, 24 insertions(+), 24 deletions(-)
rename tests/data/trn00.wav => "tests/data/tr\303\26100.wav" (100%)
diff --git a/pyannote/audio/core/task.py b/pyannote/audio/core/task.py
index 5c0b16f29..a0686fc88 100644
--- a/pyannote/audio/core/task.py
+++ b/pyannote/audio/core/task.py
@@ -215,11 +215,11 @@ class Task(pl.LightningDataModule):
pyannote.database protocol
cache : str, optional
As (meta-)data preparation might take a very long time for large datasets,
- it can be cached to disk for later (and faster!) re-use.
+ it can be cached to disk for later (and faster!) re-use.
When `cache` does not exist, `Task.prepare_data()` generates training
and validation metadata from `protocol` and save them to disk.
When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
- are loaded from disk. Defaults to a temporary path.
+ are loaded from disk. Defaults to a temporary path.
duration : float, optional
Chunks duration in seconds. Defaults to two seconds (2.).
min_duration : float, optional
@@ -567,7 +567,7 @@ def prepare_data(self):
# keep track of protocol name
prepared_data["protocol"] = self.protocol.name
- prepared_data["audio-path"] = np.array(audios, dtype=np.string_)
+ prepared_data["audio-path"] = np.array(audios, dtype=np.str_)
audios.clear()
prepared_data["audio-metadata"] = np.array(metadata, dtype=metadata_dtype)
@@ -576,7 +576,7 @@ def prepare_data(self):
prepared_data["audio-info"] = np.array(audio_infos, dtype=info_dtype)
audio_infos.clear()
- prepared_data["audio-encoding"] = np.array(audio_encodings, dtype=np.string_)
+ prepared_data["audio-encoding"] = np.array(audio_encodings, dtype=np.str_)
audio_encodings.clear()
prepared_data["audio-annotated"] = np.array(annotated_duration)
@@ -596,11 +596,11 @@ def prepare_data(self):
for database, labels in database_unique_labels.items():
prepared_data[f"metadata-{database}-labels"] = np.array(
- labels, dtype=np.string_
+ labels, dtype=np.str_
)
database_unique_labels.clear()
- prepared_data["metadata-labels"] = np.array(unique_labels, dtype=np.string_)
+ prepared_data["metadata-labels"] = np.array(unique_labels, dtype=np.str_)
unique_labels.clear()
self.prepare_validation(prepared_data)
diff --git a/pyannote/audio/tasks/segmentation/mixins.py b/pyannote/audio/tasks/segmentation/mixins.py
index 1af863d89..4e97c6e9f 100644
--- a/pyannote/audio/tasks/segmentation/mixins.py
+++ b/pyannote/audio/tasks/segmentation/mixins.py
@@ -48,16 +48,15 @@ class SegmentationTask(Task):
def get_file(self, file_id):
file = dict()
- file["audio"] = str(self.prepared_data["audio-path"][file_id], encoding="utf-8")
+ file["audio"] = self.prepared_data["audio-path"][file_id]
_audio_info = self.prepared_data["audio-info"][file_id]
- _encoding = self.prepared_data["audio-encoding"][file_id]
+ encoding = self.prepared_data["audio-encoding"][file_id]
sample_rate = _audio_info["sample_rate"]
num_frames = _audio_info["num_frames"]
num_channels = _audio_info["num_channels"]
bits_per_sample = _audio_info["bits_per_sample"]
- encoding = str(_encoding, encoding="utf-8")
file["torchaudio.info"] = AudioMetaData(
sample_rate=sample_rate,
num_frames=num_frames,
diff --git a/pyannote/audio/tasks/segmentation/multilabel.py b/pyannote/audio/tasks/segmentation/multilabel.py
index c641b5c3e..66a28e7ba 100644
--- a/pyannote/audio/tasks/segmentation/multilabel.py
+++ b/pyannote/audio/tasks/segmentation/multilabel.py
@@ -49,13 +49,13 @@ class MultiLabelSegmentation(SegmentationTask):
Parameters
----------
protocol : Protocol
- cache : str, optional
+ cache : str, optional
As (meta-)data preparation might take a very long time for large datasets,
- it can be cached to disk for later (and faster!) re-use.
+ it can be cached to disk for later (and faster!) re-use.
When `cache` does not exist, `Task.prepare_data()` generates training
and validation metadata from `protocol` and save them to disk.
When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
- are loaded from disk. Defaults to a temporary path.
+ are loaded from disk. Defaults to a temporary path.
classes : List[str], optional
List of classes. Defaults to the list of classes available in the training set.
duration : float, optional
@@ -179,7 +179,7 @@ def post_prepare_data(self, prepared_data: Dict):
[classes.index(klass) for klass in file_classes]
)
- prepared_data["classes-list"] = np.array(classes, dtype=np.string_)
+ prepared_data["classes-list"] = np.array(classes, dtype=np.str_)
self.classes = classes
else:
@@ -216,7 +216,7 @@ def post_prepare_data(self, prepared_data: Dict):
]
)
- prepared_data["classes-list"] = np.array(self.classes, dtype=np.string_)
+ prepared_data["classes-list"] = np.array(self.classes, dtype=np.str_)
# convert annotated_classes (which is a list of list of classes, one list of classes per file)
# into a single (num_files x num_classes) numpy array:
diff --git a/tests/data/database.yml b/tests/data/database.yml
index 608bf40b4..10d3fb084 100644
--- a/tests/data/database.yml
+++ b/tests/data/database.yml
@@ -2,6 +2,7 @@ Protocols:
Debug:
SpeakerDiarization:
Debug:
+ scope: database
train:
uri: debug.train.lst
annotation: debug.train.rttm
diff --git a/tests/data/debug.train.lst b/tests/data/debug.train.lst
index 16be824f4..471bf03d4 100644
--- a/tests/data/debug.train.lst
+++ b/tests/data/debug.train.lst
@@ -1,4 +1,4 @@
-trn00
+trñ00
trn01
trn02
trn03
diff --git a/tests/data/debug.train.rttm b/tests/data/debug.train.rttm
index 004a3f2eb..be89e2a4c 100644
--- a/tests/data/debug.train.rttm
+++ b/tests/data/debug.train.rttm
@@ -1,26 +1,26 @@
-SPEAKER trn00 1 3.168 0.800 MEO069
-SPEAKER trn00 1 5.463 0.640 MEO069
+SPEAKER trn00 1 3.168 0.800 MÉO069
+SPEAKER trn00 1 5.463 0.640 MÉO069
SPEAKER trn00 1 5.496 0.574 MEE068
-SPEAKER trn00 1 10.454 0.499 MEO069
+SPEAKER trn00 1 10.454 0.499 MÉO069
SPEAKER trn00 1 11.040 4.592 MEE068
-SPEAKER trn00 1 16.736 1.410 MEO069
+SPEAKER trn00 1 16.736 1.410 MÉO069
SPEAKER trn00 1 16.980 2.778 MEE067
SPEAKER trn00 1 18.883 0.490 MEE068
-SPEAKER trn00 1 18.985 1.831 MEO069
+SPEAKER trn00 1 18.985 1.831 MÉO069
SPEAKER trn00 1 20.944 0.447 MEE067
SPEAKER trn00 1 21.392 4.465 MEE068
-SPEAKER trn00 1 22.928 0.384 MEO069
-SPEAKER trn00 1 25.001 2.471 MEO069
+SPEAKER trn00 1 22.928 0.384 MÉO069
+SPEAKER trn00 1 25.001 2.471 MÉO069
SPEAKER trn00 1 28.033 1.967 MEE068
SPEAKER trn01 1 2.977 0.391 FEO066
SPEAKER trn01 1 18.705 0.964 MEE068
SPEAKER trn01 1 22.269 0.457 FEO065
-SPEAKER trn01 1 28.474 1.526 MEO069
+SPEAKER trn01 1 28.474 1.526 MÉO069
SPEAKER trn01 1 28.593 1.407 FEO066
SPEAKER trn01 1 28.993 1.007 FEO065
SPEAKER trn02 1 20.704 0.688 FEO066
SPEAKER trn03 1 0.000 1.184 MEE067
-SPEAKER trn03 1 1.104 28.896 MEO069
+SPEAKER trn03 1 1.104 28.896 MÉO069
SPEAKER trn04 1 14.032 1.744 MEE076
SPEAKER trn04 1 14.345 2.471 MEO074
SPEAKER trn04 1 16.736 7.216 MEE075
diff --git a/tests/data/trn00.wav "b/tests/data/tr\303\26100.wav"
similarity index 100%
rename from tests/data/trn00.wav
rename to "tests/data/tr\303\26100.wav"
From c921f441f6f091d9822ddc03ba982c9ac4c10104 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Cl=C3=A9ment=20Pag=C3=A9s?=
<55240756+clement-pages@users.noreply.github.com>
Date: Mon, 15 Jan 2024 16:33:27 +0100
Subject: [PATCH 41/57] fix: remove use of unsigned type (#1616)
---
pyannote/audio/core/task.py | 26 +++++++--------------
pyannote/audio/tasks/segmentation/mixins.py | 2 +-
2 files changed, 9 insertions(+), 19 deletions(-)
diff --git a/pyannote/audio/core/task.py b/pyannote/audio/core/task.py
index a0686fc88..82e8939fe 100644
--- a/pyannote/audio/core/task.py
+++ b/pyannote/audio/core/task.py
@@ -160,7 +160,7 @@ def __len__(self):
return self.task.val__len__()
-def get_dtype(value: int, unsigned: Optional[bool] = False) -> str:
+def get_dtype(value: int) -> str:
"""Return the most suitable type for storing the
value passed in parameter in memory.
@@ -168,8 +168,6 @@ def get_dtype(value: int, unsigned: Optional[bool] = False) -> str:
----------
value: int
value whose type is best suited to storage in memory
- unsigned: bool, optional
- positive integer mode only. Default to False
Returns
-------
@@ -177,21 +175,13 @@ def get_dtype(value: int, unsigned: Optional[bool] = False) -> str:
numpy formatted type
(see https://numpy.org/doc/stable/reference/arrays.dtypes.html)
"""
- if unsigned:
- if value < 0:
- raise ValueError(
- f"negative value ({value}) is incompatible with unsigned types"
- )
- # unsigned byte (8 bits), unsigned short (16 bits), unsigned int (32 bits)
- types_list = [(255, "B"), (65_535, "u2"), (4_294_967_296, "u4")]
- else:
- # signe byte (8 bits), signed short (16 bits), signed int (32 bits):
- types_list = [(127, "b"), (32_768, "i2"), (2_147_483_648, "i")]
+ # signe byte (8 bits), signed short (16 bits), signed int (32 bits):
+ types_list = [(127, "b"), (32_768, "i2"), (2_147_483_648, "i")]
filtered_list = [
(max_val, type) for max_val, type in types_list if max_val > abs(value)
]
if not filtered_list:
- return "u8" if unsigned else "i8" # unsigned or signed long (64 bits)
+ return "i8" # signed long (64 bits)
return filtered_list[0][1]
@@ -528,11 +518,11 @@ def prepare_data(self):
info_dtype = [
(
"sample_rate",
- get_dtype(max(ai[0] for ai in audio_infos), unsigned=True),
+ get_dtype(max(ai[0] for ai in audio_infos)),
),
(
"num_frames",
- get_dtype(max(ai[1] for ai in audio_infos), unsigned=True),
+ get_dtype(max(ai[1] for ai in audio_infos)),
),
("num_channels", "B"),
("bits_per_sample", "B"),
@@ -542,7 +532,7 @@ def prepare_data(self):
region_dtype = [
(
"file_id",
- get_dtype(max(ar[0] for ar in annotated_regions), unsigned=True),
+ get_dtype(max(ar[0] for ar in annotated_regions)),
),
("duration", "f"),
("start", "f"),
@@ -552,7 +542,7 @@ def prepare_data(self):
segment_dtype = [
(
"file_id",
- get_dtype(max(a[0] for a in annotations), unsigned=True),
+ get_dtype(max(a[0] for a in annotations)),
),
("start", "f"),
("end", "f"),
diff --git a/pyannote/audio/tasks/segmentation/mixins.py b/pyannote/audio/tasks/segmentation/mixins.py
index 4e97c6e9f..9c5bcd792 100644
--- a/pyannote/audio/tasks/segmentation/mixins.py
+++ b/pyannote/audio/tasks/segmentation/mixins.py
@@ -287,7 +287,7 @@ def prepare_validation(self, prepared_data: Dict):
dtype = [
(
"file_id",
- get_dtype(max(v[0] for v in validation_chunks), unsigned=True),
+ get_dtype(max(v[0] for v in validation_chunks)),
),
("start", "f"),
("duration", "f"),
From 293d8fc33eeb0887c001e1e61c2b60be0570f842 Mon Sep 17 00:00:00 2001
From: FrenchKrab <14005967+FrenchKrab@users.noreply.github.com>
Date: Wed, 24 Jan 2024 11:26:28 +0100
Subject: [PATCH 42/57] improve: add missing Optional typing
---
pyannote/audio/augmentation/mix.py | 10 +++----
pyannote/audio/cli/evaluate.py | 2 +-
.../CosineAnnealingWarmRestarts.py | 3 +-
pyannote/audio/cli/lr_schedulers/CyclicLR.py | 3 +-
pyannote/audio/core/callback.py | 4 +--
pyannote/audio/core/inference.py | 10 +++----
pyannote/audio/core/io.py | 4 ++-
pyannote/audio/core/model.py | 6 ++--
pyannote/audio/core/task.py | 6 ++--
.../models/embedding/wespeaker/__init__.py | 2 +-
.../models/embedding/wespeaker/resnet.py | 6 ++--
pyannote/audio/models/embedding/xvector.py | 8 +++---
pyannote/audio/models/segmentation/PyanNet.py | 6 ++--
.../audio/models/segmentation/SSeRiouSS.py | 4 +--
pyannote/audio/pipelines/clustering.py | 28 +++++++++----------
pyannote/audio/pipelines/multilabel.py | 2 +-
.../pipelines/overlapped_speech_detection.py | 2 +-
pyannote/audio/pipelines/resegmentation.py | 4 +--
.../audio/pipelines/speaker_diarization.py | 25 +++++++++++------
.../audio/pipelines/speaker_verification.py | 24 ++++++++--------
pyannote/audio/pipelines/utils/diarization.py | 12 ++++----
pyannote/audio/pipelines/utils/getter.py | 4 +--
pyannote/audio/pipelines/utils/oracle.py | 4 +--
.../pipelines/voice_activity_detection.py | 2 +-
pyannote/audio/tasks/embedding/arcface.py | 8 +++---
.../audio/tasks/segmentation/multilabel.py | 8 +++---
.../overlapped_speech_detection.py | 23 +++++++--------
.../tasks/segmentation/speaker_diarization.py | 28 ++++++++++---------
.../segmentation/voice_activity_detection.py | 14 +++++-----
pyannote/audio/utils/loss.py | 16 +++++++----
pyannote/audio/utils/params.py | 4 ++-
pyannote/audio/utils/preview.py | 6 ++--
tutorials/add_your_own_task.ipynb | 4 +--
33 files changed, 159 insertions(+), 133 deletions(-)
diff --git a/pyannote/audio/augmentation/mix.py b/pyannote/audio/augmentation/mix.py
index a6fff49c0..c6e811280 100644
--- a/pyannote/audio/augmentation/mix.py
+++ b/pyannote/audio/augmentation/mix.py
@@ -60,10 +60,10 @@ def __init__(
max_snr_in_db: float = 5.0,
mode: str = "per_example",
p: float = 0.5,
- p_mode: str = None,
- sample_rate: int = None,
- target_rate: int = None,
- max_num_speakers: int = None,
+ p_mode: Optional[str] = None,
+ sample_rate: Optional[int] = None,
+ target_rate: Optional[int] = None,
+ max_num_speakers: Optional[int] = None,
output_type: str = "tensor",
):
super().__init__(
@@ -80,7 +80,7 @@ def __init__(
def randomize_parameters(
self,
- samples: Tensor = None,
+ samples: Optional[Tensor] = None,
sample_rate: Optional[int] = None,
targets: Optional[Tensor] = None,
target_rate: Optional[int] = None,
diff --git a/pyannote/audio/cli/evaluate.py b/pyannote/audio/cli/evaluate.py
index a5ab682c5..d88c88635 100644
--- a/pyannote/audio/cli/evaluate.py
+++ b/pyannote/audio/cli/evaluate.py
@@ -53,7 +53,7 @@ def evaluate(cfg: DictConfig) -> Optional[float]:
main_task = progress.add_task(protocol.name, total=len(files))
file_task = progress.add_task("Processing", total=1.0)
- def progress_hook(completed: int = None, total: int = None):
+ def progress_hook(completed: Optional[int] = None, total: Optional[int] = None):
progress.update(file_task, completed=completed / total)
inference = Inference(model, device=device)
diff --git a/pyannote/audio/cli/lr_schedulers/CosineAnnealingWarmRestarts.py b/pyannote/audio/cli/lr_schedulers/CosineAnnealingWarmRestarts.py
index d8e5f4b3c..3c270eba1 100644
--- a/pyannote/audio/cli/lr_schedulers/CosineAnnealingWarmRestarts.py
+++ b/pyannote/audio/cli/lr_schedulers/CosineAnnealingWarmRestarts.py
@@ -20,6 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+from typing import Optional
from torch.optim import Optimizer
from torch.optim.lr_scheduler import (
@@ -32,7 +33,7 @@ def CosineAnnealingWarmRestarts(
min_lr: float = 1e-8,
max_lr: float = 1e-3,
patience: int = 1,
- num_batches_per_epoch: int = None,
+ num_batches_per_epoch: Optional[int] = None,
**kwargs,
):
"""Wrapper around CosineAnnealingWarmRestarts
diff --git a/pyannote/audio/cli/lr_schedulers/CyclicLR.py b/pyannote/audio/cli/lr_schedulers/CyclicLR.py
index cd7a7b730..cca4420b0 100644
--- a/pyannote/audio/cli/lr_schedulers/CyclicLR.py
+++ b/pyannote/audio/cli/lr_schedulers/CyclicLR.py
@@ -20,6 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+from typing import Optional
from torch.optim import Optimizer
from torch.optim.lr_scheduler import CyclicLR as _CyclicLR
@@ -31,7 +32,7 @@ def CyclicLR(
max_lr: float = 1e-3,
mode: str = "triangular2",
patience: int = 50,
- num_batches_per_epoch: int = None,
+ num_batches_per_epoch: Optional[int] = None,
**kwargs,
):
"""Wrapper around CyclicLR learning rate scheduler
diff --git a/pyannote/audio/core/callback.py b/pyannote/audio/core/callback.py
index 5ce522d57..0cc46845b 100644
--- a/pyannote/audio/core/callback.py
+++ b/pyannote/audio/core/callback.py
@@ -20,7 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-from typing import List, Mapping, Text, Union
+from typing import List, Mapping, Optional, Text, Union
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities.model_summary import ModelSummary
@@ -67,7 +67,7 @@ class GraduallyUnfreeze(Callback):
def __init__(
self,
schedule: Union[Mapping[Text, int], List[Union[List[Text], Text]]] = None,
- epochs_per_stage: int = None,
+ epochs_per_stage: Optional[int] = None,
):
super().__init__()
diff --git a/pyannote/audio/core/inference.py b/pyannote/audio/core/inference.py
index dcf21868d..2d6976f32 100644
--- a/pyannote/audio/core/inference.py
+++ b/pyannote/audio/core/inference.py
@@ -86,12 +86,12 @@ def __init__(
self,
model: Union[Model, Text, Path],
window: Text = "sliding",
- duration: float = None,
- step: float = None,
+ duration: Optional[float] = None,
+ step: Optional[float] = None,
pre_aggregation_hook: Callable[[np.ndarray], np.ndarray] = None,
skip_aggregation: bool = False,
skip_conversion: bool = False,
- device: torch.device = None,
+ device: Optional[torch.device] = None,
batch_size: int = 32,
use_auth_token: Union[Text, None] = None,
):
@@ -526,7 +526,7 @@ def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray:
@staticmethod
def aggregate(
scores: SlidingWindowFeature,
- frames: SlidingWindow = None,
+ frames: Optional[SlidingWindow] = None,
warm_up: Tuple[float, float] = (0.0, 0.0),
epsilon: float = 1e-12,
hamming: bool = False,
@@ -702,7 +702,7 @@ def trim(
@staticmethod
def stitch(
activations: SlidingWindowFeature,
- frames: SlidingWindow = None,
+ frames: Optional[SlidingWindow] = None,
lookahead: Optional[Tuple[int, int]] = None,
cost_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
match_func: Callable[[np.ndarray, np.ndarray, float], bool] = None,
diff --git a/pyannote/audio/core/io.py b/pyannote/audio/core/io.py
index 0a44e75ea..cbb7ee828 100644
--- a/pyannote/audio/core/io.py
+++ b/pyannote/audio/core/io.py
@@ -253,7 +253,9 @@ def get_duration(self, file: AudioFile) -> float:
return frames / sample_rate
- def get_num_samples(self, duration: float, sample_rate: int = None) -> int:
+ def get_num_samples(
+ self, duration: float, sample_rate: Optional[int] = None
+ ) -> int:
"""Deterministic number of samples from duration and sample rate"""
sample_rate = sample_rate or self.sample_rate
diff --git a/pyannote/audio/core/model.py b/pyannote/audio/core/model.py
index 098c15c02..2acc1248b 100644
--- a/pyannote/audio/core/model.py
+++ b/pyannote/audio/core/model.py
@@ -196,7 +196,7 @@ def example_output(self) -> Union[Output, Tuple[Output]]:
def __example_output(
example_output: torch.Tensor,
- specifications: Specifications = None,
+ specifications: Optional[Specifications] = None,
) -> Output:
if specifications.resolution == Resolution.FRAME:
_, num_frames, dimension = example_output.shape
@@ -341,7 +341,9 @@ def default_activation(self) -> Union[nn.Module, Tuple[nn.Module]]:
Activation.
"""
- def __default_activation(specifications: Specifications = None) -> nn.Module:
+ def __default_activation(
+ specifications: Optional[Specifications] = None,
+ ) -> nn.Module:
if specifications.problem == Problem.BINARY_CLASSIFICATION:
return nn.Sigmoid()
diff --git a/pyannote/audio/core/task.py b/pyannote/audio/core/task.py
index 82e8939fe..afad62717 100644
--- a/pyannote/audio/core/task.py
+++ b/pyannote/audio/core/task.py
@@ -250,12 +250,12 @@ def __init__(
protocol: Protocol,
cache: Optional[Union[str, None]] = None,
duration: float = 2.0,
- min_duration: float = None,
+ min_duration: Optional[float] = None,
warm_up: Union[float, Tuple[float, float]] = 0.0,
batch_size: int = 32,
- num_workers: int = None,
+ num_workers: Optional[int] = None,
pin_memory: bool = False,
- augmentation: BaseWaveformTransform = None,
+ augmentation: Optional[BaseWaveformTransform] = None,
metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None,
):
super().__init__()
diff --git a/pyannote/audio/models/embedding/wespeaker/__init__.py b/pyannote/audio/models/embedding/wespeaker/__init__.py
index 603a88c64..c504435c3 100644
--- a/pyannote/audio/models/embedding/wespeaker/__init__.py
+++ b/pyannote/audio/models/embedding/wespeaker/__init__.py
@@ -96,7 +96,7 @@ def compute_fbank(self, waveforms: torch.Tensor) -> torch.Tensor:
return features - torch.mean(features, dim=1, keepdim=True)
def forward(
- self, waveforms: torch.Tensor, weights: torch.Tensor = None
+ self, waveforms: torch.Tensor, weights: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
diff --git a/pyannote/audio/models/embedding/wespeaker/resnet.py b/pyannote/audio/models/embedding/wespeaker/resnet.py
index 54f95fa8b..21fc98fc5 100644
--- a/pyannote/audio/models/embedding/wespeaker/resnet.py
+++ b/pyannote/audio/models/embedding/wespeaker/resnet.py
@@ -15,6 +15,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Optional
+
import torch
import torch.nn as nn
import torch.nn.functional as F
@@ -35,7 +37,7 @@ def __init__(self, in_dim=0, **kwargs):
self.in_dim = in_dim
self.stats_pool = StatsPool()
- def forward(self, features, weights: torch.Tensor = None):
+ def forward(self, features, weights: Optional[torch.Tensor] = None):
"""
Parameters
@@ -190,7 +192,7 @@ def _make_layer(self, block, planes, num_blocks, stride):
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
- def forward(self, x: torch.Tensor, weights: torch.Tensor = None):
+ def forward(self, x: torch.Tensor, weights: Optional[torch.Tensor] = None):
"""
Parameters
diff --git a/pyannote/audio/models/embedding/xvector.py b/pyannote/audio/models/embedding/xvector.py
index 975f0a991..b5a5463ce 100644
--- a/pyannote/audio/models/embedding/xvector.py
+++ b/pyannote/audio/models/embedding/xvector.py
@@ -41,7 +41,7 @@ def __init__(
self,
sample_rate: int = 16000,
num_channels: int = 1,
- mfcc: dict = None,
+ mfcc: Optional[dict] = None,
dimension: int = 512,
task: Optional[Task] = None,
):
@@ -82,7 +82,7 @@ def __init__(
self.embedding = nn.Linear(in_channel * 2, self.hparams.dimension)
def forward(
- self, waveforms: torch.Tensor, weights: torch.Tensor = None
+ self, waveforms: torch.Tensor, weights: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
@@ -109,7 +109,7 @@ def __init__(
self,
sample_rate: int = 16000,
num_channels: int = 1,
- sincnet: dict = None,
+ sincnet: Optional[dict] = None,
dimension: int = 512,
task: Optional[Task] = None,
):
@@ -150,7 +150,7 @@ def __init__(
self.embedding = nn.Linear(in_channel * 2, self.hparams.dimension)
def forward(
- self, waveforms: torch.Tensor, weights: torch.Tensor = None
+ self, waveforms: torch.Tensor, weights: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
diff --git a/pyannote/audio/models/segmentation/PyanNet.py b/pyannote/audio/models/segmentation/PyanNet.py
index 2c4443e06..b6cdc604b 100644
--- a/pyannote/audio/models/segmentation/PyanNet.py
+++ b/pyannote/audio/models/segmentation/PyanNet.py
@@ -74,9 +74,9 @@ class PyanNet(Model):
def __init__(
self,
- sincnet: dict = None,
- lstm: dict = None,
- linear: dict = None,
+ sincnet: Optional[dict] = None,
+ lstm: Optional[dict] = None,
+ linear: Optional[dict] = None,
sample_rate: int = 16000,
num_channels: int = 1,
task: Optional[Task] = None,
diff --git a/pyannote/audio/models/segmentation/SSeRiouSS.py b/pyannote/audio/models/segmentation/SSeRiouSS.py
index 9ba656182..45e9ddb7d 100644
--- a/pyannote/audio/models/segmentation/SSeRiouSS.py
+++ b/pyannote/audio/models/segmentation/SSeRiouSS.py
@@ -79,8 +79,8 @@ def __init__(
self,
wav2vec: Union[dict, str] = None,
wav2vec_layer: int = -1,
- lstm: dict = None,
- linear: dict = None,
+ lstm: Optional[dict] = None,
+ linear: Optional[dict] = None,
sample_rate: int = 16000,
num_channels: int = 1,
task: Optional[Task] = None,
diff --git a/pyannote/audio/pipelines/clustering.py b/pyannote/audio/pipelines/clustering.py
index 80098ea24..cd4b38935 100644
--- a/pyannote/audio/pipelines/clustering.py
+++ b/pyannote/audio/pipelines/clustering.py
@@ -25,7 +25,7 @@
import random
from enum import Enum
-from typing import Tuple
+from typing import Optional, Tuple
import numpy as np
from einops import rearrange
@@ -56,9 +56,9 @@ def __init__(
def set_num_clusters(
self,
num_embeddings: int,
- num_clusters: int = None,
- min_clusters: int = None,
- max_clusters: int = None,
+ num_clusters: Optional[int] = None,
+ min_clusters: Optional[int] = None,
+ max_clusters: Optional[int] = None,
):
min_clusters = num_clusters or min_clusters or 1
min_clusters = max(1, min(num_embeddings, min_clusters))
@@ -79,7 +79,7 @@ def set_num_clusters(
def filter_embeddings(
self,
embeddings: np.ndarray,
- segmentations: SlidingWindowFeature = None,
+ segmentations: Optional[SlidingWindowFeature] = None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Filter NaN embeddings and downsample embeddings
@@ -205,10 +205,10 @@ def assign_embeddings(
def __call__(
self,
embeddings: np.ndarray,
- segmentations: SlidingWindowFeature = None,
- num_clusters: int = None,
- min_clusters: int = None,
- max_clusters: int = None,
+ segmentations: Optional[SlidingWindowFeature] = None,
+ num_clusters: Optional[int] = None,
+ min_clusters: Optional[int] = None,
+ max_clusters: Optional[int] = None,
**kwargs,
) -> np.ndarray:
"""Apply clustering
@@ -323,7 +323,7 @@ def cluster(
embeddings: np.ndarray,
min_clusters: int,
max_clusters: int,
- num_clusters: int = None,
+ num_clusters: Optional[int] = None,
):
"""
@@ -476,10 +476,10 @@ class OracleClustering(BaseClustering):
def __call__(
self,
- embeddings: np.ndarray = None,
- segmentations: SlidingWindowFeature = None,
- file: AudioFile = None,
- frames: SlidingWindow = None,
+ embeddings: Optional[np.ndarray] = None,
+ segmentations: Optional[SlidingWindowFeature] = None,
+ file: Optional[AudioFile] = None,
+ frames: Optional[SlidingWindow] = None,
**kwargs,
) -> np.ndarray:
"""Apply oracle clustering
diff --git a/pyannote/audio/pipelines/multilabel.py b/pyannote/audio/pipelines/multilabel.py
index 18693f14c..b35ebee7c 100644
--- a/pyannote/audio/pipelines/multilabel.py
+++ b/pyannote/audio/pipelines/multilabel.py
@@ -75,7 +75,7 @@ class MultiLabelSegmentation(Pipeline):
def __init__(
self,
- segmentation: PipelineModel = None,
+ segmentation: Optional[PipelineModel] = None,
fscore: bool = False,
share_min_duration: bool = False,
use_auth_token: Union[Text, None] = None,
diff --git a/pyannote/audio/pipelines/overlapped_speech_detection.py b/pyannote/audio/pipelines/overlapped_speech_detection.py
index 1c9790feb..66e61c949 100644
--- a/pyannote/audio/pipelines/overlapped_speech_detection.py
+++ b/pyannote/audio/pipelines/overlapped_speech_detection.py
@@ -255,7 +255,7 @@ def compute_components(
_self,
reference: Annotation,
hypothesis: Annotation,
- uem: Timeline = None,
+ uem: Optional[Timeline] = None,
**kwargs,
) -> dict:
return super().compute_components(
diff --git a/pyannote/audio/pipelines/resegmentation.py b/pyannote/audio/pipelines/resegmentation.py
index d01e5d65f..1eeade4b1 100644
--- a/pyannote/audio/pipelines/resegmentation.py
+++ b/pyannote/audio/pipelines/resegmentation.py
@@ -86,7 +86,7 @@ def __init__(
self,
segmentation: PipelineModel = "pyannote/segmentation",
diarization: Text = "diarization",
- der_variant: dict = None,
+ der_variant: Optional[dict] = None,
use_auth_token: Union[Text, None] = None,
):
super().__init__()
@@ -137,7 +137,7 @@ def classes(self):
def apply(
self,
file: AudioFile,
- diarization: Annotation = None,
+ diarization: Optional[Annotation] = None,
hook: Optional[Callable] = None,
) -> Annotation:
"""Apply speaker diarization
diff --git a/pyannote/audio/pipelines/speaker_diarization.py b/pyannote/audio/pipelines/speaker_diarization.py
index 354f6be7e..46a7188d1 100644
--- a/pyannote/audio/pipelines/speaker_diarization.py
+++ b/pyannote/audio/pipelines/speaker_diarization.py
@@ -121,7 +121,7 @@ def __init__(
clustering: str = "AgglomerativeClustering",
embedding_batch_size: int = 1,
segmentation_batch_size: int = 1,
- der_variant: dict = None,
+ der_variant: Optional[dict] = None,
use_auth_token: Union[Text, None] = None,
):
super().__init__()
@@ -428,9 +428,9 @@ def reconstruct(
def apply(
self,
file: AudioFile,
- num_speakers: int = None,
- min_speakers: int = None,
- max_speakers: int = None,
+ num_speakers: Optional[int] = None,
+ min_speakers: Optional[int] = None,
+ max_speakers: Optional[int] = None,
return_embeddings: bool = False,
hook: Optional[Callable] = None,
) -> Annotation:
@@ -538,15 +538,20 @@ def apply(
# detected number of speakers can still be out of bounds
# (specifically, lower than `min_speakers`), since there could be too few embeddings
# to make enough clusters with a given minimum cluster size.
- if num_different_speakers < min_speakers or num_different_speakers > max_speakers:
- warnings.warn(textwrap.dedent(
- f"""
+ if (
+ num_different_speakers < min_speakers
+ or num_different_speakers > max_speakers
+ ):
+ warnings.warn(
+ textwrap.dedent(
+ f"""
The detected number of speakers ({num_different_speakers}) is outside
the given bounds [{min_speakers}, {max_speakers}]. This can happen if the
given audio file is too short to contain {min_speakers} or more speakers.
Try to lower the desired minimal number of speakers.
"""
- ))
+ )
+ )
# during counting, we could possibly overcount the number of instantaneous
# speakers due to segmentation errors, so we cap the maximum instantaneous number
@@ -618,7 +623,9 @@ def apply(
# of clusters obtained from `clustering`. In this case, we append zero embeddings
# for extra speakers
if len(diarization.labels()) > centroids.shape[0]:
- centroids = np.pad(centroids, ((0, len(diarization.labels()) - centroids.shape[0]), (0, 0)))
+ centroids = np.pad(
+ centroids, ((0, len(diarization.labels()) - centroids.shape[0]), (0, 0))
+ )
# re-order centroids so that they match
# the order given by diarization.labels()
diff --git a/pyannote/audio/pipelines/speaker_verification.py b/pyannote/audio/pipelines/speaker_verification.py
index c870ea622..fe762bd65 100644
--- a/pyannote/audio/pipelines/speaker_verification.py
+++ b/pyannote/audio/pipelines/speaker_verification.py
@@ -23,7 +23,7 @@
import warnings
from functools import cached_property
from pathlib import Path
-from typing import Text, Union
+from typing import Optional, Text, Union
import numpy as np
import torch
@@ -73,7 +73,7 @@ class NeMoPretrainedSpeakerEmbedding(BaseInference):
def __init__(
self,
embedding: Text = "nvidia/speakerverification_en_titanet_large",
- device: torch.device = None,
+ device: Optional[torch.device] = None,
):
if not NEMO_IS_AVAILABLE:
raise ImportError(
@@ -139,7 +139,7 @@ def min_num_samples(self) -> int:
return upper
def __call__(
- self, waveforms: torch.Tensor, masks: torch.Tensor = None
+ self, waveforms: torch.Tensor, masks: Optional[torch.Tensor] = None
) -> np.ndarray:
"""
@@ -238,7 +238,7 @@ class SpeechBrainPretrainedSpeakerEmbedding(BaseInference):
def __init__(
self,
embedding: Text = "speechbrain/spkrec-ecapa-voxceleb",
- device: torch.device = None,
+ device: Optional[torch.device] = None,
use_auth_token: Union[Text, None] = None,
):
if not SPEECHBRAIN_IS_AVAILABLE:
@@ -314,7 +314,7 @@ def min_num_samples(self) -> int:
return upper
def __call__(
- self, waveforms: torch.Tensor, masks: torch.Tensor = None
+ self, waveforms: torch.Tensor, masks: Optional[torch.Tensor] = None
) -> np.ndarray:
"""
@@ -414,7 +414,7 @@ class ONNXWeSpeakerPretrainedSpeakerEmbedding(BaseInference):
def __init__(
self,
embedding: Text = "hbredin/wespeaker-voxceleb-resnet34-LM",
- device: torch.device = None,
+ device: Optional[torch.device] = None,
):
if not ONNX_IS_AVAILABLE:
raise ImportError(
@@ -560,7 +560,7 @@ def compute_fbank(
return features - torch.mean(features, dim=1, keepdim=True)
def __call__(
- self, waveforms: torch.Tensor, masks: torch.Tensor = None
+ self, waveforms: torch.Tensor, masks: Optional[torch.Tensor] = None
) -> np.ndarray:
"""
@@ -645,7 +645,7 @@ class PyannoteAudioPretrainedSpeakerEmbedding(BaseInference):
def __init__(
self,
embedding: PipelineModel = "pyannote/embedding",
- device: torch.device = None,
+ device: Optional[torch.device] = None,
use_auth_token: Union[Text, None] = None,
):
super().__init__()
@@ -695,7 +695,7 @@ def min_num_samples(self) -> int:
return upper
def __call__(
- self, waveforms: torch.Tensor, masks: torch.Tensor = None
+ self, waveforms: torch.Tensor, masks: Optional[torch.Tensor] = None
) -> np.ndarray:
with torch.inference_mode():
if masks is None:
@@ -711,7 +711,7 @@ def __call__(
def PretrainedSpeakerEmbedding(
embedding: PipelineModel,
- device: torch.device = None,
+ device: Optional[torch.device] = None,
use_auth_token: Union[Text, None] = None,
):
"""Pretrained speaker embedding
@@ -801,7 +801,7 @@ class SpeakerEmbedding(Pipeline):
def __init__(
self,
embedding: PipelineModel = "pyannote/embedding",
- segmentation: PipelineModel = None,
+ segmentation: Optional[PipelineModel] = None,
use_auth_token: Union[Text, None] = None,
):
super().__init__()
@@ -848,7 +848,7 @@ def main(
protocol: str = "VoxCeleb.SpeakerVerification.VoxCeleb1",
subset: str = "test",
embedding: str = "pyannote/embedding",
- segmentation: str = None,
+ segmentation: Optional[str] = None,
):
import typer
from pyannote.database import FileFinder, get_protocol
diff --git a/pyannote/audio/pipelines/utils/diarization.py b/pyannote/audio/pipelines/utils/diarization.py
index 4a35f7049..f6797194c 100644
--- a/pyannote/audio/pipelines/utils/diarization.py
+++ b/pyannote/audio/pipelines/utils/diarization.py
@@ -20,7 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-from typing import Dict, Mapping, Tuple, Union
+from typing import Dict, Mapping, Optional, Tuple, Union
import numpy as np
from pyannote.core import Annotation, SlidingWindow, SlidingWindowFeature
@@ -28,7 +28,7 @@
from pyannote.metrics.diarization import DiarizationErrorRate
from pyannote.audio.core.inference import Inference
-from pyannote.audio.utils.signal import Binarize, binarize
+from pyannote.audio.utils.signal import Binarize
# TODO: move to dedicated module
@@ -37,9 +37,9 @@ class SpeakerDiarizationMixin:
@staticmethod
def set_num_speakers(
- num_speakers: int = None,
- min_speakers: int = None,
- max_speakers: int = None,
+ num_speakers: Optional[int] = None,
+ min_speakers: Optional[int] = None,
+ max_speakers: Optional[int] = None,
):
"""Validate number of speakers
@@ -122,7 +122,7 @@ def optimal_mapping(
def speaker_count(
binarized_segmentations: SlidingWindowFeature,
warm_up: Tuple[float, float] = (0.1, 0.1),
- frames: SlidingWindow = None,
+ frames: Optional[SlidingWindow] = None,
) -> SlidingWindowFeature:
"""Estimate frame-level number of instantaneous speakers
diff --git a/pyannote/audio/pipelines/utils/getter.py b/pyannote/audio/pipelines/utils/getter.py
index 4c589ad05..51040d1c4 100644
--- a/pyannote/audio/pipelines/utils/getter.py
+++ b/pyannote/audio/pipelines/utils/getter.py
@@ -21,7 +21,7 @@
# SOFTWARE.
import itertools
-from typing import Mapping, Text, Union
+from typing import Mapping, Optional, Text, Union
import torch
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
@@ -171,7 +171,7 @@ def get_augmentation(augmentation: PipelineAugmentation) -> BaseWaveformTransfor
)
-def get_devices(needs: int = None):
+def get_devices(needs: Optional[int] = None):
"""Get devices that can be used by the pipeline
Parameters
diff --git a/pyannote/audio/pipelines/utils/oracle.py b/pyannote/audio/pipelines/utils/oracle.py
index 44b4ded61..3bf9ebc9f 100644
--- a/pyannote/audio/pipelines/utils/oracle.py
+++ b/pyannote/audio/pipelines/utils/oracle.py
@@ -20,7 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-from typing import Union
+from typing import Optional, Union
import numpy as np
from pyannote.core import Annotation, Segment, SlidingWindow, SlidingWindowFeature
@@ -32,7 +32,7 @@ def oracle_segmentation(
file: AudioFile,
window: SlidingWindow,
frames: Union[SlidingWindow, float],
- num_speakers: int = None,
+ num_speakers: Optional[int] = None,
) -> SlidingWindowFeature:
"""Oracle speaker segmentation
diff --git a/pyannote/audio/pipelines/voice_activity_detection.py b/pyannote/audio/pipelines/voice_activity_detection.py
index f67489b64..39e529d89 100644
--- a/pyannote/audio/pipelines/voice_activity_detection.py
+++ b/pyannote/audio/pipelines/voice_activity_detection.py
@@ -284,7 +284,7 @@ class AdaptiveVoiceActivityDetection(Pipeline):
def __init__(
self,
segmentation: PipelineInference = "hbredin/VoiceActivityDetection-PyanNet-DIHARD",
- augmentation: PipelineAugmentation = None,
+ augmentation: Optional[PipelineAugmentation] = None,
fscore: bool = False,
):
super().__init__()
diff --git a/pyannote/audio/tasks/embedding/arcface.py b/pyannote/audio/tasks/embedding/arcface.py
index bb2cb1f6c..cb6401e2b 100644
--- a/pyannote/audio/tasks/embedding/arcface.py
+++ b/pyannote/audio/tasks/embedding/arcface.py
@@ -23,7 +23,7 @@
from __future__ import annotations
-from typing import Dict, Sequence, Union
+from typing import Dict, Optional, Sequence, Union
import pytorch_metric_learning.losses
from pyannote.database import Protocol
@@ -82,15 +82,15 @@ class SupervisedRepresentationLearningWithArcFace(
def __init__(
self,
protocol: Protocol,
- min_duration: float = None,
+ min_duration: Optional[float] = None,
duration: float = 2.0,
num_classes_per_batch: int = 32,
num_chunks_per_class: int = 1,
margin: float = 28.6,
scale: float = 64.0,
- num_workers: int = None,
+ num_workers: Optional[int] = None,
pin_memory: bool = False,
- augmentation: BaseWaveformTransform = None,
+ augmentation: Optional[BaseWaveformTransform] = None,
metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None,
):
diff --git a/pyannote/audio/tasks/segmentation/multilabel.py b/pyannote/audio/tasks/segmentation/multilabel.py
index 66a28e7ba..0e4a4aadc 100644
--- a/pyannote/audio/tasks/segmentation/multilabel.py
+++ b/pyannote/audio/tasks/segmentation/multilabel.py
@@ -96,12 +96,12 @@ def __init__(
classes: Optional[List[str]] = None,
duration: float = 2.0,
warm_up: Union[float, Tuple[float, float]] = 0.0,
- balance: Sequence[Text] = None,
- weight: Text = None,
+ balance: Optional[Sequence[Text]] = None,
+ weight: Optional[Text] = None,
batch_size: int = 32,
- num_workers: int = None,
+ num_workers: Optional[int] = None,
pin_memory: bool = False,
- augmentation: BaseWaveformTransform = None,
+ augmentation: Optional[BaseWaveformTransform] = None,
metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None,
):
if not isinstance(protocol, SegmentationProtocol):
diff --git a/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py b/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py
index 7249ed0f4..97db20032 100644
--- a/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py
+++ b/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py
@@ -51,13 +51,13 @@ class OverlappedSpeechDetection(SegmentationTask):
----------
protocol : Protocol
pyannote.database protocol
- cache : str, optional
+ cache : str, optional
As (meta-)data preparation might take a very long time for large datasets,
- it can be cached to disk for later (and faster!) re-use.
+ it can be cached to disk for later (and faster!) re-use.
When `cache` does not exist, `Task.prepare_data()` generates training
and validation metadata from `protocol` and save them to disk.
When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
- are loaded from disk. Defaults to a temporary path.
+ are loaded from disk. Defaults to a temporary path.
duration : float, optional
Chunks duration. Defaults to 2s.
warm_up : float or (float, float), optional
@@ -73,11 +73,12 @@ class OverlappedSpeechDetection(SegmentationTask):
overlap: dict, optional
Controls how artificial chunks with overlapping speech are generated:
- "probability" key is the probability of artificial overlapping chunks. Setting
- "probability" to 0.6 means that, on average, 40% of training chunks are "real"
- chunks, while 60% are artifical chunks made out of the (weighted) sum of two
- chunks. Defaults to 0.5.
+ "probability" to 0.6 means that, on average, 40% of training chunks are "real"
+ chunks, while 60% are artifical chunks made out of the (weighted) sum of two
+ chunks. Defaults to 0.5.
- "snr_min" and "snr_max" keys control the minimum and maximum signal-to-noise
- ratio between summed chunks, in dB. Default to 0.0 and 10.
+ ratio between summed chunks, in dB. Default to 0.0 and 10.
+
weight: str, optional
When provided, use this key to as frame-wise weight in loss function.
batch_size : int, optional
@@ -105,12 +106,12 @@ def __init__(
duration: float = 2.0,
warm_up: Union[float, Tuple[float, float]] = 0.0,
overlap: dict = OVERLAP_DEFAULTS,
- balance: Sequence[Text] = None,
- weight: Text = None,
+ balance: Optional[Sequence[Text]] = None,
+ weight: Optional[Text] = None,
batch_size: int = 32,
- num_workers: int = None,
+ num_workers: Optional[int] = None,
pin_memory: bool = False,
- augmentation: BaseWaveformTransform = None,
+ augmentation: Optional[BaseWaveformTransform] = None,
metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None,
cache: Optional[Union[str, None]] = None,
):
diff --git a/pyannote/audio/tasks/segmentation/speaker_diarization.py b/pyannote/audio/tasks/segmentation/speaker_diarization.py
index 47c5adc63..fb635dc9c 100644
--- a/pyannote/audio/tasks/segmentation/speaker_diarization.py
+++ b/pyannote/audio/tasks/segmentation/speaker_diarization.py
@@ -65,13 +65,13 @@ class SpeakerDiarization(SegmentationTask):
----------
protocol : SpeakerDiarizationProtocol
pyannote.database protocol
- cache : str, optional
+ cache : str, optional
As (meta-)data preparation might take a very long time for large datasets,
- it can be cached to disk for later (and faster!) re-use.
+ it can be cached to disk for later (and faster!) re-use.
When `cache` does not exist, `Task.prepare_data()` generates training
and validation metadata from `protocol` and save them to disk.
When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
- are loaded from disk. Defaults to a temporary path.
+ are loaded from disk. Defaults to a temporary path.
duration : float, optional
Chunks duration. Defaults to 2s.
max_speakers_per_chunk : int, optional
@@ -136,19 +136,21 @@ def __init__(
protocol: SpeakerDiarizationProtocol,
cache: Optional[Union[str, None]] = None,
duration: float = 2.0,
- max_speakers_per_chunk: int = None,
- max_speakers_per_frame: int = None,
+ max_speakers_per_chunk: Optional[int] = None,
+ max_speakers_per_frame: Optional[int] = None,
weigh_by_cardinality: bool = False,
warm_up: Union[float, Tuple[float, float]] = 0.0,
- balance: Sequence[Text] = None,
- weight: Text = None,
+ balance: Optional[Sequence[Text]] = None,
+ weight: Optional[Text] = None,
batch_size: int = 32,
- num_workers: int = None,
+ num_workers: Optional[int] = None,
pin_memory: bool = False,
- augmentation: BaseWaveformTransform = None,
+ augmentation: Optional[BaseWaveformTransform] = None,
vad_loss: Literal["bce", "mse"] = None,
metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None,
- max_num_speakers: int = None, # deprecated in favor of `max_speakers_per_chunk``
+ max_num_speakers: Optional[
+ int
+ ] = None, # deprecated in favor of `max_speakers_per_chunk``
loss: Literal["bce", "mse"] = None, # deprecated
):
super().__init__(
@@ -437,7 +439,7 @@ def segmentation_loss(
self,
permutated_prediction: torch.Tensor,
target: torch.Tensor,
- weight: torch.Tensor = None,
+ weight: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Permutation-invariant segmentation loss
@@ -480,7 +482,7 @@ def voice_activity_detection_loss(
self,
permutated_prediction: torch.Tensor,
target: torch.Tensor,
- weight: torch.Tensor = None,
+ weight: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Voice activity detection loss
@@ -878,7 +880,7 @@ def main(protocol: str, subset: str = "test", model: str = "pyannote/segmentatio
main_task = progress.add_task(protocol.name, total=len(files))
file_task = progress.add_task("Processing", total=1.0)
- def progress_hook(completed: int = None, total: int = None):
+ def progress_hook(completed: Optional[int] = None, total: Optional[int] = None):
progress.update(file_task, completed=completed / total)
inference = Inference(model, device=device)
diff --git a/pyannote/audio/tasks/segmentation/voice_activity_detection.py b/pyannote/audio/tasks/segmentation/voice_activity_detection.py
index 183fa2ffc..6d0bdb98d 100644
--- a/pyannote/audio/tasks/segmentation/voice_activity_detection.py
+++ b/pyannote/audio/tasks/segmentation/voice_activity_detection.py
@@ -45,13 +45,13 @@ class VoiceActivityDetection(SegmentationTask):
----------
protocol : Protocol
pyannote.database protocol
- cache : str, optional
+ cache : str, optional
As (meta-)data preparation might take a very long time for large datasets,
- it can be cached to disk for later (and faster!) re-use.
+ it can be cached to disk for later (and faster!) re-use.
When `cache` does not exist, `Task.prepare_data()` generates training
and validation metadata from `protocol` and save them to disk.
When `cache` exists, `Task.prepare_data()` is skipped and (meta)-data
- are loaded from disk. Defaults to a temporary path.
+ are loaded from disk. Defaults to a temporary path.
duration : float, optional
Chunks duration. Defaults to 2s.
warm_up : float or (float, float), optional
@@ -89,12 +89,12 @@ def __init__(
cache: Optional[Union[str, None]] = None,
duration: float = 2.0,
warm_up: Union[float, Tuple[float, float]] = 0.0,
- balance: Sequence[Text] = None,
- weight: Text = None,
+ balance: Optional[Sequence[Text]] = None,
+ weight: Optional[Text] = None,
batch_size: int = 32,
- num_workers: int = None,
+ num_workers: Optional[int] = None,
pin_memory: bool = False,
- augmentation: BaseWaveformTransform = None,
+ augmentation: Optional[BaseWaveformTransform] = None,
metric: Union[Metric, Sequence[Metric], Dict[str, Metric]] = None,
):
super().__init__(
diff --git a/pyannote/audio/utils/loss.py b/pyannote/audio/utils/loss.py
index 2c55b26f3..55121a678 100644
--- a/pyannote/audio/utils/loss.py
+++ b/pyannote/audio/utils/loss.py
@@ -23,11 +23,13 @@
"""Frame-weighted versions of common loss functions"""
+from typing import Optional
+
import torch
import torch.nn.functional as F
-def interpolate(target: torch.Tensor, weight: torch.Tensor = None):
+def interpolate(target: torch.Tensor, weight: Optional[torch.Tensor] = None):
"""Interpolate weight to match target frame resolution
Parameters
@@ -55,7 +57,9 @@ def interpolate(target: torch.Tensor, weight: torch.Tensor = None):
def binary_cross_entropy(
- prediction: torch.Tensor, target: torch.Tensor, weight: torch.Tensor = None
+ prediction: torch.Tensor,
+ target: torch.Tensor,
+ weight: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Frame-weighted binary cross entropy
@@ -91,7 +95,9 @@ def binary_cross_entropy(
def mse_loss(
- prediction: torch.Tensor, target: torch.Tensor, weight: torch.Tensor = None
+ prediction: torch.Tensor,
+ target: torch.Tensor,
+ weight: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Frame-weighted mean-squared error loss
@@ -131,8 +137,8 @@ def mse_loss(
def nll_loss(
prediction: torch.Tensor,
target: torch.Tensor,
- class_weight: torch.Tensor = None,
- weight: torch.Tensor = None,
+ class_weight: Optional[torch.Tensor] = None,
+ weight: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Frame-weighted negative log-likelihood loss
diff --git a/pyannote/audio/utils/params.py b/pyannote/audio/utils/params.py
index 685e01653..f4ed42bcc 100644
--- a/pyannote/audio/utils/params.py
+++ b/pyannote/audio/utils/params.py
@@ -1,8 +1,10 @@
# TODO - make it depth-recursive
# TODO - switch to Omegaconf maybe?
+from typing import Optional
-def merge_dict(defaults: dict, custom: dict = None):
+
+def merge_dict(defaults: dict, custom: Optional[dict] = None):
params = dict(defaults)
if custom is not None:
params.update(custom)
diff --git a/pyannote/audio/utils/preview.py b/pyannote/audio/utils/preview.py
index fcdf4d124..1a5ace08c 100644
--- a/pyannote/audio/utils/preview.py
+++ b/pyannote/audio/utils/preview.py
@@ -47,7 +47,7 @@
MOVIEPY_INSTALLED = False
-from typing import Mapping
+from typing import Mapping, Optional
import torch
from pyannote.core import (
@@ -64,7 +64,7 @@
from pyannote.audio.utils.signal import Binarize
-def listen(audio_file: AudioFile, segment: Segment = None) -> None:
+def listen(audio_file: AudioFile, segment: Optional[Segment] = None) -> None:
"""listen to audio
Allows playing of audio files. It will play the whole thing unless
@@ -91,7 +91,7 @@ def listen(audio_file: AudioFile, segment: Segment = None) -> None:
def preview(
audio_file: AudioFile,
- segment: Segment = None,
+ segment: Optional[Segment] = None,
zoom: float = 10.0,
video_fps: int = 5,
video_ext: str = "webm",
diff --git a/tutorials/add_your_own_task.ipynb b/tutorials/add_your_own_task.ipynb
index 38daa73fb..7fe7858cb 100644
--- a/tutorials/add_your_own_task.ipynb
+++ b/tutorials/add_your_own_task.ipynb
@@ -148,7 +148,7 @@
"outputs": [],
"source": [
"from math import ceil\n",
- "from typing import Dict, Optional,Tuple, Union\n",
+ "from typing import Dict, Optional, Tuple, Union\n",
"import numpy as np\n",
"from pyannote.core import Segment, SlidingWindow\n",
"from pyannote.audio.utils.random import create_rng_for_worker\n",
@@ -167,7 +167,7 @@
" min_duration: float = 5.0,\n",
" warm_up: Union[float, Tuple[float, float]] = 0.0,\n",
" batch_size: int = 32,\n",
- " num_workers: int = None,\n",
+ " num_workers: Optional[int] = None,\n",
" pin_memory: bool = False,\n",
" augmentation = None,\n",
" cache: Optional[Union[str, None]] = None,\n",
From c0b9e79aa8063c7ddc78e7213799d0aeae9d3d10 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Wed, 24 Jan 2024 21:47:01 +0100
Subject: [PATCH 43/57] BREAKING(model): get rid of `Model.example_output` in
favor of `num_frames`, `receptive_field`, and `dimension` properties (#1617)
---
CHANGELOG.md | 3 +-
pyannote/audio/core/inference.py | 169 ++----------------
pyannote/audio/core/model.py | 39 ----
pyannote/audio/models/blocks/sincnet.py | 4 +
pyannote/audio/models/embedding/debug.py | 6 +-
.../models/embedding/wespeaker/__init__.py | 5 +
pyannote/audio/models/embedding/xvector.py | 12 +-
pyannote/audio/models/segmentation/PyanNet.py | 27 +--
.../audio/models/segmentation/SSeRiouSS.py | 25 +--
pyannote/audio/models/segmentation/debug.py | 20 ++-
.../pipelines/overlapped_speech_detection.py | 2 +-
pyannote/audio/pipelines/resegmentation.py | 5 +-
.../audio/pipelines/speaker_diarization.py | 7 +-
.../audio/pipelines/speaker_verification.py | 2 +-
pyannote/audio/pipelines/utils/diarization.py | 8 +-
pyannote/audio/pipelines/utils/oracle.py | 2 +-
.../audio/tasks/segmentation/multilabel.py | 21 ++-
.../overlapped_speech_detection.py | 21 ++-
.../tasks/segmentation/speaker_diarization.py | 23 ++-
.../segmentation/voice_activity_detection.py | 21 ++-
pyannote/audio/utils/frame.py | 4 +-
21 files changed, 156 insertions(+), 270 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1cff92eb4..29e5dc311 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,7 +6,7 @@
- feat(task): add option to cache task training metadata to speed up training
- feat(pipeline): add `Waveform` and `SampleRate` preprocessors
-- feat(model): add `num_frames` and `receptive_field` to segmentation models
+- feat(model): add `num_frames`, `receptive_field`, and `dimension` properties to segmentation models
### Fixes
@@ -14,6 +14,7 @@
## Breaking changes
+- BREAKING(model): get rid of `Model.example_output` in favor of `num_frames`, `receptive_field`, and `dimension` properties
- BREAKING(task): custom tasks need to be updated (see "Add your own task" tutorial)
## Version 3.1.1 (2023-12-01)
diff --git a/pyannote/audio/core/inference.py b/pyannote/audio/core/inference.py
index 2d6976f32..0c3e9b212 100644
--- a/pyannote/audio/core/inference.py
+++ b/pyannote/audio/core/inference.py
@@ -20,7 +20,6 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-import math
import warnings
from pathlib import Path
from typing import Callable, List, Optional, Text, Tuple, Union
@@ -37,7 +36,6 @@
from pyannote.audio.core.model import Model, Specifications
from pyannote.audio.core.task import Resolution
from pyannote.audio.utils.multi_task import map_with_specifications
-from pyannote.audio.utils.permutation import mae_cost_func, permutate
from pyannote.audio.utils.powerset import Powerset
from pyannote.audio.utils.reproducibility import fix_reproducibility
@@ -263,16 +261,14 @@ def slide(
_, num_samples = waveform.shape
def __frames(
- example_output, specifications: Optional[Specifications] = None
+ receptive_field, specifications: Optional[Specifications] = None
) -> SlidingWindow:
if specifications.resolution == Resolution.CHUNK:
return SlidingWindow(start=0.0, duration=self.duration, step=self.step)
- return example_output.frames
+ return receptive_field
frames: Union[SlidingWindow, Tuple[SlidingWindow]] = map_with_specifications(
- self.model.specifications,
- __frames,
- self.model.example_output,
+ self.model.specifications, __frames, self.model.receptive_field
)
# prepare complete chunks
@@ -373,7 +369,7 @@ def __aggregate(
outputs,
SlidingWindow(start=0.0, duration=self.duration, step=self.step),
),
- frames=frames,
+ frames,
warm_up=self.warm_up,
hamming=True,
missing=0.0,
@@ -526,7 +522,7 @@ def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray:
@staticmethod
def aggregate(
scores: SlidingWindowFeature,
- frames: Optional[SlidingWindow] = None,
+ frames: SlidingWindow,
warm_up: Tuple[float, float] = (0.0, 0.0),
epsilon: float = 1e-12,
hamming: bool = False,
@@ -539,10 +535,8 @@ def aggregate(
----------
scores : SlidingWindowFeature
Raw (unaggregated) scores. Shape is (num_chunks, num_frames_per_chunk, num_classes).
- frames : SlidingWindow, optional
- Frames resolution. Defaults to estimate it automatically based on `scores` shape
- and chunk size. Providing the exact frame resolution (when known) leads to better
- temporal precision.
+ frames : SlidingWindow
+ Frames resolution.
warm_up : (float, float) tuple, optional
Left/right warm up duration (in seconds).
missing : float, optional
@@ -559,15 +553,11 @@ def aggregate(
num_chunks, num_frames_per_chunk, num_classes = scores.data.shape
chunks = scores.sliding_window
- if frames is None:
- duration = step = chunks.duration / num_frames_per_chunk
- frames = SlidingWindow(start=chunks.start, duration=duration, step=step)
- else:
- frames = SlidingWindow(
- start=chunks.start,
- duration=frames.duration,
- step=frames.step,
- )
+ frames = SlidingWindow(
+ start=chunks.start,
+ duration=frames.duration,
+ step=frames.step,
+ )
masks = 1 - np.isnan(scores)
scores.data = np.nan_to_num(scores.data, copy=True, nan=0.0)
@@ -602,6 +592,7 @@ def aggregate(
scores.sliding_window.start
+ scores.sliding_window.duration
+ (num_chunks - 1) * scores.sliding_window.step
+ + 0.5 * frames.duration
)
+ 1
)
@@ -627,7 +618,8 @@ def aggregate(
# score ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray
# mask ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray
- start_frame = frames.closest_frame(chunk.start)
+ start_frame = frames.closest_frame(chunk.start + 0.5 * frames.duration)
+
aggregated_output[start_frame : start_frame + num_frames_per_chunk] += (
score * mask * hamming_window * warm_up_window
)
@@ -698,134 +690,3 @@ def trim(
)
return SlidingWindowFeature(new_data, new_chunks)
-
- @staticmethod
- def stitch(
- activations: SlidingWindowFeature,
- frames: Optional[SlidingWindow] = None,
- lookahead: Optional[Tuple[int, int]] = None,
- cost_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
- match_func: Callable[[np.ndarray, np.ndarray, float], bool] = None,
- ) -> SlidingWindowFeature:
- """
-
- Parameters
- ----------
- activations : SlidingWindowFeature
- (num_chunks, num_frames, num_classes)-shaped scores.
- frames : SlidingWindow, optional
- Frames resolution. Defaults to estimate it automatically based on `activations`
- shape and chunk size. Providing the exact frame resolution (when known) leads to better
- temporal precision.
- lookahead : (int, int) tuple
- Number of past and future adjacent chunks to use for stitching.
- Defaults to (k, k) with k = chunk_duration / chunk_step - 1
- cost_func : callable
- Cost function used to find the optimal mapping between two chunks.
- Expects two (num_frames, num_classes) torch.tensor as input
- and returns cost as a (num_classes, ) torch.tensor
- Defaults to mean absolute error (utils.permutations.mae_cost_func)
- match_func : callable
- Function used to decide whether two speakers mapped by the optimal
- mapping actually are a match.
- Expects two (num_frames, ) np.ndarray and the cost (from cost_func)
- and returns a boolean. Defaults to always returning True.
- """
-
- num_chunks, num_frames, num_classes = activations.data.shape
-
- chunks: SlidingWindow = activations.sliding_window
-
- if frames is None:
- duration = step = chunks.duration / num_frames
- frames = SlidingWindow(start=chunks.start, duration=duration, step=step)
- else:
- frames = SlidingWindow(
- start=chunks.start,
- duration=frames.duration,
- step=frames.step,
- )
-
- max_lookahead = math.floor(chunks.duration / chunks.step - 1)
- if lookahead is None:
- lookahead = 2 * (max_lookahead,)
-
- assert all(L <= max_lookahead for L in lookahead)
-
- if cost_func is None:
- cost_func = mae_cost_func
-
- if match_func is None:
-
- def always_match(this: np.ndarray, that: np.ndarray, cost: float):
- return True
-
- match_func = always_match
-
- stitches = []
- for C, (chunk, activation) in enumerate(activations):
- local_stitch = np.NAN * np.zeros(
- (sum(lookahead) + 1, num_frames, num_classes)
- )
-
- for c in range(
- max(0, C - lookahead[0]), min(num_chunks, C + lookahead[1] + 1)
- ):
- # extract common temporal support
- shift = round((C - c) * num_frames * chunks.step / chunks.duration)
-
- if shift < 0:
- shift = -shift
- this_activations = activation[shift:]
- that_activations = activations[c, : num_frames - shift]
- else:
- this_activations = activation[: num_frames - shift]
- that_activations = activations[c, shift:]
-
- # find the optimal one-to-one mapping
- _, (permutation,), (cost,) = permutate(
- this_activations[np.newaxis],
- that_activations,
- cost_func=cost_func,
- return_cost=True,
- )
-
- for this, that in enumerate(permutation):
- # only stitch under certain condiditions
- matching = (c == C) or (
- match_func(
- this_activations[:, this],
- that_activations[:, that],
- cost[this, that],
- )
- )
-
- if matching:
- local_stitch[c - C + lookahead[0], :, this] = activations[
- c, :, that
- ]
-
- # TODO: do not lookahead further once a mismatch is found
-
- stitched_chunks = SlidingWindow(
- start=chunk.start - lookahead[0] * chunks.step,
- duration=chunks.duration,
- step=chunks.step,
- )
-
- local_stitch = Inference.aggregate(
- SlidingWindowFeature(local_stitch, stitched_chunks),
- frames=frames,
- hamming=True,
- )
-
- stitches.append(local_stitch.data)
-
- stitches = np.stack(stitches)
- stitched_chunks = SlidingWindow(
- start=chunks.start - lookahead[0] * chunks.step,
- duration=chunks.duration + sum(lookahead) * chunks.step,
- step=chunks.step,
- )
-
- return SlidingWindowFeature(stitches, stitched_chunks)
diff --git a/pyannote/audio/core/model.py b/pyannote/audio/core/model.py
index 2acc1248b..27f34e7f8 100644
--- a/pyannote/audio/core/model.py
+++ b/pyannote/audio/core/model.py
@@ -25,7 +25,6 @@
import os
import warnings
from dataclasses import dataclass
-from functools import cached_property
from importlib import import_module
from pathlib import Path
from typing import Any, Dict, List, Optional, Text, Tuple, Union
@@ -46,7 +45,6 @@
from pyannote.audio.core.io import Audio
from pyannote.audio.core.task import (
Problem,
- Resolution,
Specifications,
Task,
UnknownSpecificationsError,
@@ -112,10 +110,6 @@ def task(self) -> Task:
def task(self, task: Task):
# reset (cached) properties when task changes
del self.specifications
- try:
- del self.example_output
- except AttributeError:
- pass
self._task = task
def build(self):
@@ -187,36 +181,6 @@ def __example_input_array(self, duration: Optional[float] = None) -> torch.Tenso
def example_input_array(self) -> torch.Tensor:
return self.__example_input_array()
- @cached_property
- def example_output(self) -> Union[Output, Tuple[Output]]:
- """Example output"""
- example_input_array = self.__example_input_array()
- with torch.inference_mode():
- example_output = self(example_input_array)
-
- def __example_output(
- example_output: torch.Tensor,
- specifications: Optional[Specifications] = None,
- ) -> Output:
- if specifications.resolution == Resolution.FRAME:
- _, num_frames, dimension = example_output.shape
- frame_duration = specifications.duration / num_frames
- frames = SlidingWindow(step=frame_duration, duration=frame_duration)
- else:
- _, dimension = example_output.shape
- num_frames = None
- frames = None
-
- return Output(
- num_frames=num_frames,
- dimension=dimension,
- frames=frames,
- )
-
- return map_with_specifications(
- self.specifications, __example_output, example_output
- )
-
def prepare_data(self):
self.task.prepare_data()
@@ -270,9 +234,6 @@ def setup(self, stage=None):
# setup custom validation metrics
self.task.setup_validation_metric()
- # cache for later (and to avoid later CUDA error with multiprocessing)
- _ = self.example_output
-
# list of layers after adding task-dependent layers
after = set((name, id(module)) for name, module in self.named_modules())
diff --git a/pyannote/audio/models/blocks/sincnet.py b/pyannote/audio/models/blocks/sincnet.py
index 33e312ba5..a7657c2e9 100644
--- a/pyannote/audio/models/blocks/sincnet.py
+++ b/pyannote/audio/models/blocks/sincnet.py
@@ -24,6 +24,8 @@
# Hervé Bredin - http://herve.niderb.fr
+from functools import cached_property, lru_cache
+
import torch
import torch.nn as nn
import torch.nn.functional as F
@@ -74,6 +76,7 @@ def __init__(self, sample_rate: int = 16000, stride: int = 1):
self.pool1d.append(nn.MaxPool1d(3, stride=3, padding=0, dilation=1))
self.norm1d.append(nn.InstanceNorm1d(60, affine=True))
+ @lru_cache
def num_frames(self, num_samples: int) -> int:
"""Compute number of output frames for a given number of input samples
@@ -132,6 +135,7 @@ def receptive_field_size(self, num_frames: int = 1) -> int:
return receptive_field_size
+ @cached_property
def receptive_field(self) -> SlidingWindow:
"""Compute receptive field
diff --git a/pyannote/audio/models/embedding/debug.py b/pyannote/audio/models/embedding/debug.py
index 11b3def30..775b6550f 100644
--- a/pyannote/audio/models/embedding/debug.py
+++ b/pyannote/audio/models/embedding/debug.py
@@ -39,7 +39,6 @@ def __init__(
num_channels: int = 1,
task: Optional[Task] = None,
):
-
super().__init__(sample_rate=sample_rate, num_channels=num_channels, task=task)
self.mfcc = MFCC(
@@ -58,6 +57,11 @@ def __init__(
bidirectional=True,
)
+ @property
+ def dimension(self) -> int:
+ """Dimension of output"""
+ return 64
+
def forward(self, waveforms: torch.Tensor) -> torch.Tensor:
"""
diff --git a/pyannote/audio/models/embedding/wespeaker/__init__.py b/pyannote/audio/models/embedding/wespeaker/__init__.py
index c504435c3..8f1e62105 100644
--- a/pyannote/audio/models/embedding/wespeaker/__init__.py
+++ b/pyannote/audio/models/embedding/wespeaker/__init__.py
@@ -95,6 +95,11 @@ def compute_fbank(self, waveforms: torch.Tensor) -> torch.Tensor:
return features - torch.mean(features, dim=1, keepdim=True)
+ @property
+ def dimension(self) -> int:
+ """Dimension of output"""
+ return self.resnet.embed_dim
+
def forward(
self, waveforms: torch.Tensor, weights: Optional[torch.Tensor] = None
) -> torch.Tensor:
diff --git a/pyannote/audio/models/embedding/xvector.py b/pyannote/audio/models/embedding/xvector.py
index b5a5463ce..85610509d 100644
--- a/pyannote/audio/models/embedding/xvector.py
+++ b/pyannote/audio/models/embedding/xvector.py
@@ -34,7 +34,6 @@
class XVectorMFCC(Model):
-
MFCC_DEFAULTS = {"n_mfcc": 40, "dct_type": 2, "norm": "ortho", "log_mels": False}
def __init__(
@@ -81,6 +80,11 @@ def __init__(
self.embedding = nn.Linear(in_channel * 2, self.hparams.dimension)
+ @property
+ def dimension(self) -> int:
+ """Dimension of output"""
+ return self.hparams.dimension
+
def forward(
self, waveforms: torch.Tensor, weights: Optional[torch.Tensor] = None
) -> torch.Tensor:
@@ -102,7 +106,6 @@ def forward(
class XVectorSincNet(Model):
-
SINCNET_DEFAULTS = {"stride": 10}
def __init__(
@@ -149,6 +152,11 @@ def __init__(
self.embedding = nn.Linear(in_channel * 2, self.hparams.dimension)
+ @property
+ def dimension(self) -> int:
+ """Dimension of output"""
+ return self.hparams.dimension
+
def forward(
self, waveforms: torch.Tensor, weights: Optional[torch.Tensor] = None
) -> torch.Tensor:
diff --git a/pyannote/audio/models/segmentation/PyanNet.py b/pyannote/audio/models/segmentation/PyanNet.py
index b6cdc604b..9d397abcb 100644
--- a/pyannote/audio/models/segmentation/PyanNet.py
+++ b/pyannote/audio/models/segmentation/PyanNet.py
@@ -20,7 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-
+from functools import cached_property, lru_cache
from typing import Optional
import torch
@@ -139,6 +139,17 @@ def __init__(
]
)
+ @property
+ def dimension(self) -> int:
+ """Dimension of output"""
+ if isinstance(self.specifications, tuple):
+ raise ValueError("PyanNet does not support multi-tasking.")
+
+ if self.specifications.powerset:
+ return self.specifications.num_powerset_classes
+ else:
+ return len(self.specifications.classes)
+
def build(self):
if self.hparams.linear["num_layers"] > 0:
in_features = self.hparams.linear["hidden_size"]
@@ -147,17 +158,10 @@ def build(self):
2 if self.hparams.lstm["bidirectional"] else 1
)
- if isinstance(self.specifications, tuple):
- raise ValueError("PyanNet does not support multi-tasking.")
-
- if self.specifications.powerset:
- out_features = self.specifications.num_powerset_classes
- else:
- out_features = len(self.specifications.classes)
-
- self.classifier = nn.Linear(in_features, out_features)
+ self.classifier = nn.Linear(in_features, self.dimension)
self.activation = self.default_activation()
+ @lru_cache
def num_frames(self, num_samples: int) -> int:
"""Compute number of output frames for a given number of input samples
@@ -174,6 +178,7 @@ def num_frames(self, num_samples: int) -> int:
return self.sincnet.num_frames(num_samples)
+ @cached_property
def receptive_field(self) -> SlidingWindow:
"""Compute receptive field
@@ -186,7 +191,7 @@ def receptive_field(self) -> SlidingWindow:
https://distill.pub/2019/computing-receptive-fields/
"""
- return self.sincnet.receptive_field()
+ return self.sincnet.receptive_field
def forward(self, waveforms: torch.Tensor) -> torch.Tensor:
"""Pass forward
diff --git a/pyannote/audio/models/segmentation/SSeRiouSS.py b/pyannote/audio/models/segmentation/SSeRiouSS.py
index 45e9ddb7d..514ad00de 100644
--- a/pyannote/audio/models/segmentation/SSeRiouSS.py
+++ b/pyannote/audio/models/segmentation/SSeRiouSS.py
@@ -20,7 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-
+from functools import cached_property, lru_cache
from typing import Optional, Union
import torch
@@ -174,6 +174,17 @@ def __init__(
]
)
+ @property
+ def dimension(self) -> int:
+ """Dimension of output"""
+ if isinstance(self.specifications, tuple):
+ raise ValueError("SSeRiouSS does not support multi-tasking.")
+
+ if self.specifications.powerset:
+ return self.specifications.num_powerset_classes
+ else:
+ return len(self.specifications.classes)
+
def build(self):
if self.hparams.linear["num_layers"] > 0:
in_features = self.hparams.linear["hidden_size"]
@@ -182,17 +193,10 @@ def build(self):
2 if self.hparams.lstm["bidirectional"] else 1
)
- if isinstance(self.specifications, tuple):
- raise ValueError("SSeRiouSS model does not support multi-tasking.")
-
- if self.specifications.powerset:
- out_features = self.specifications.num_powerset_classes
- else:
- out_features = len(self.specifications.classes)
-
- self.classifier = nn.Linear(in_features, out_features)
+ self.classifier = nn.Linear(in_features, self.dimension)
self.activation = self.default_activation()
+ @lru_cache
def num_frames(self, num_samples: int) -> int:
"""Compute number of output frames for a given number of input samples
@@ -245,6 +249,7 @@ def receptive_field_size(self, num_frames: int = 1) -> int:
return receptive_field_size
+ @cached_property
def receptive_field(self) -> SlidingWindow:
"""Compute receptive field
diff --git a/pyannote/audio/models/segmentation/debug.py b/pyannote/audio/models/segmentation/debug.py
index a230bf768..d41dba51a 100644
--- a/pyannote/audio/models/segmentation/debug.py
+++ b/pyannote/audio/models/segmentation/debug.py
@@ -21,6 +21,7 @@
# SOFTWARE.
+from functools import cached_property, lru_cache
from typing import Optional
import torch
@@ -58,6 +59,7 @@ def __init__(
bidirectional=True,
)
+ @lru_cache
def num_frames(self, num_samples: int) -> int:
"""Compute number of output frames for a given number of input samples
@@ -80,7 +82,7 @@ def num_frames(self, num_samples: int) -> int:
hop_length = self.mfcc.MelSpectrogram.spectrogram.hop_length
n_fft = self.mfcc.MelSpectrogram.spectrogram.n_fft
center = self.mfcc.MelSpectrogram.spectrogram.center
- return (
+ return int(
1 + num_samples // hop_length
if center
else 1 + (num_samples - n_fft) // hop_length
@@ -109,6 +111,7 @@ def receptive_field_size(self, num_frames: int = 1) -> int:
else:
return (num_frames - 1) * hop_length + n_fft
+ @cached_property
def receptive_field(self) -> SlidingWindow:
"""Compute receptive field
@@ -134,18 +137,21 @@ def receptive_field(self) -> SlidingWindow:
return SlidingWindow(start=0.0, duration=duration, step=step)
- def build(self):
- # define task-dependent layers
-
+ @property
+ def dimension(self) -> int:
+ """Dimension of output"""
if isinstance(self.specifications, tuple):
raise ValueError("SimpleSegmentationModel does not support multi-tasking.")
if self.specifications.powerset:
- out_features = self.specifications.num_powerset_classes
+ return self.specifications.num_powerset_classes
else:
- out_features = len(self.specifications.classes)
+ return len(self.specifications.classes)
+
+ def build(self):
+ # define task-dependent layers
- self.classifier = nn.Linear(32 * 2, out_features)
+ self.classifier = nn.Linear(32 * 2, self.dimension)
self.activation = self.default_activation()
def forward(self, waveforms: torch.Tensor) -> torch.Tensor:
diff --git a/pyannote/audio/pipelines/overlapped_speech_detection.py b/pyannote/audio/pipelines/overlapped_speech_detection.py
index 66e61c949..1429e4299 100644
--- a/pyannote/audio/pipelines/overlapped_speech_detection.py
+++ b/pyannote/audio/pipelines/overlapped_speech_detection.py
@@ -128,7 +128,7 @@ def __init__(
# load model
model = get_model(segmentation, use_auth_token=use_auth_token)
- if model.example_output.dimension > 1:
+ if model.dimension > 1:
inference_kwargs["pre_aggregation_hook"] = lambda scores: np.partition(
scores, -2, axis=-1
)[:, :, -2, np.newaxis]
diff --git a/pyannote/audio/pipelines/resegmentation.py b/pyannote/audio/pipelines/resegmentation.py
index 1eeade4b1..85492f774 100644
--- a/pyannote/audio/pipelines/resegmentation.py
+++ b/pyannote/audio/pipelines/resegmentation.py
@@ -96,7 +96,6 @@ def __init__(
model: Model = get_model(segmentation, use_auth_token=use_auth_token)
self._segmentation = Inference(model)
- self._frames = self._segmentation.model.example_output.frames
self._audio = model.audio
@@ -193,8 +192,8 @@ def apply(
# estimate frame-level number of instantaneous speakers
count = self.speaker_count(
binarized_segmentations,
+ self._segmentation.model.receptive_field,
warm_up=(self.warm_up, self.warm_up),
- frames=self._frames,
)
hook("speaker_counting", count)
@@ -205,7 +204,7 @@ def apply(
support=Segment(
0.0, self._audio.get_duration(file) + self._segmentation.step
),
- resolution=self._frames,
+ resolution=self._segmentation.model.receptive_field,
)
hook("@resegmentation/original", diarization)
diff --git a/pyannote/audio/pipelines/speaker_diarization.py b/pyannote/audio/pipelines/speaker_diarization.py
index 46a7188d1..737cd1cb2 100644
--- a/pyannote/audio/pipelines/speaker_diarization.py
+++ b/pyannote/audio/pipelines/speaker_diarization.py
@@ -32,7 +32,7 @@
import numpy as np
import torch
from einops import rearrange
-from pyannote.core import Annotation, SlidingWindow, SlidingWindowFeature
+from pyannote.core import Annotation, SlidingWindowFeature
from pyannote.metrics.diarization import GreedyDiarizationErrorRate
from pyannote.pipeline.parameter import ParamDict, Uniform
@@ -147,7 +147,6 @@ def __init__(
skip_aggregation=True,
batch_size=segmentation_batch_size,
)
- self._frames: SlidingWindow = self._segmentation.model.example_output.frames
if self._segmentation.model.specifications.powerset:
self.segmentation = ParamDict(
@@ -493,7 +492,7 @@ def apply(
# estimate frame-level number of instantaneous speakers
count = self.speaker_count(
binarized_segmentations,
- frames=self._frames,
+ self._segmentation.model.receptive_field,
warm_up=(0.0, 0.0),
)
hook("speaker_counting", count)
@@ -527,7 +526,7 @@ def apply(
min_clusters=min_speakers,
max_clusters=max_speakers,
file=file, # <== for oracle clustering
- frames=self._frames, # <== for oracle clustering
+ frames=self._segmentation.model.receptive_field, # <== for oracle clustering
)
# hard_clusters: (num_chunks, num_speakers)
# centroids: (num_speakers, dimension)
diff --git a/pyannote/audio/pipelines/speaker_verification.py b/pyannote/audio/pipelines/speaker_verification.py
index fe762bd65..c49468a79 100644
--- a/pyannote/audio/pipelines/speaker_verification.py
+++ b/pyannote/audio/pipelines/speaker_verification.py
@@ -672,7 +672,7 @@ def sample_rate(self) -> int:
@cached_property
def dimension(self) -> int:
- return self.model_.example_output.dimension
+ return self.model_.dimension
@cached_property
def metric(self) -> str:
diff --git a/pyannote/audio/pipelines/utils/diarization.py b/pyannote/audio/pipelines/utils/diarization.py
index f6797194c..5a0f8f675 100644
--- a/pyannote/audio/pipelines/utils/diarization.py
+++ b/pyannote/audio/pipelines/utils/diarization.py
@@ -121,8 +121,8 @@ def optimal_mapping(
@staticmethod
def speaker_count(
binarized_segmentations: SlidingWindowFeature,
+ frames: SlidingWindow,
warm_up: Tuple[float, float] = (0.1, 0.1),
- frames: Optional[SlidingWindow] = None,
) -> SlidingWindowFeature:
"""Estimate frame-level number of instantaneous speakers
@@ -133,7 +133,7 @@ def speaker_count(
warm_up : (float, float) tuple, optional
Left/right warm up ratio of chunk duration.
Defaults to (0.1, 0.1), i.e. 10% on both sides.
- frames : SlidingWindow, optional
+ frames : SlidingWindow
Frames resolution. Defaults to estimate it automatically based on
`segmentations` shape and chunk size. Providing the exact frame
resolution (when known) leads to better temporal precision.
@@ -147,7 +147,7 @@ def speaker_count(
trimmed = Inference.trim(binarized_segmentations, warm_up=warm_up)
count = Inference.aggregate(
np.sum(trimmed, axis=-1, keepdims=True),
- frames=frames,
+ frames,
hamming=False,
missing=0.0,
skip_average=False,
@@ -212,7 +212,7 @@ def to_diarization(
# TODO: investigate alternative aggregation
activations = Inference.aggregate(
segmentations,
- frames=count.sliding_window,
+ count.sliding_window,
hamming=False,
missing=0.0,
skip_average=True,
diff --git a/pyannote/audio/pipelines/utils/oracle.py b/pyannote/audio/pipelines/utils/oracle.py
index 3bf9ebc9f..24401f752 100644
--- a/pyannote/audio/pipelines/utils/oracle.py
+++ b/pyannote/audio/pipelines/utils/oracle.py
@@ -39,7 +39,7 @@ def oracle_segmentation(
Simulates inference based on an (imaginary) oracle segmentation model:
>>> oracle = Model.from_pretrained("oracle")
- >>> assert frames == oracle.example_output.frames
+ >>> assert frames == oracle.receptive_field
>>> inference = Inference(oracle, duration=window.duration, step=window.step, skip_aggregation=True)
>>> oracle_segmentation = inference(file)
diff --git a/pyannote/audio/tasks/segmentation/multilabel.py b/pyannote/audio/tasks/segmentation/multilabel.py
index 0e4a4aadc..9184121c4 100644
--- a/pyannote/audio/tasks/segmentation/multilabel.py
+++ b/pyannote/audio/tasks/segmentation/multilabel.py
@@ -292,15 +292,22 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
]
# discretize chunk annotations at model output resolution
- start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start
- start_idx = np.floor(start / self.model.example_output.frames.step).astype(int)
- end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start
- end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int)
+ step = self.model.receptive_field.step
+ half = 0.5 * self.model.receptive_field.duration
+
+ start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start - half
+ start_idx = np.maximum(0, np.round(start / step)).astype(int)
+
+ end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start - half
+ end_idx = np.round(end / step).astype(int)
# frame-level targets (-1 for un-annotated classes)
+ num_frames = self.model.num_frames(
+ round(duration * self.model.hparams.sample_rate)
+ )
y = -np.ones(
(
- self.model.example_output.num_frames,
+ num_frames,
len(self.prepared_data["classes-list"]),
),
dtype=np.int8,
@@ -309,10 +316,10 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
for start, end, label in zip(
start_idx, end_idx, chunk_annotations["global_label_idx"]
):
- y[start:end, label] = 1
+ y[start : end + 1, label] = 1
sample["y"] = SlidingWindowFeature(
- y, self.model.example_output.frames, labels=self.classes
+ y, self.model.receptive_field, labels=self.classes
)
metadata = self.prepared_data["audio-metadata"][file_id]
diff --git a/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py b/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py
index 97db20032..89d299a8d 100644
--- a/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py
+++ b/pyannote/audio/tasks/segmentation/overlapped_speech_detection.py
@@ -183,19 +183,26 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
]
# discretize chunk annotations at model output resolution
- start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start
- start_idx = np.floor(start / self.model.example_output.frames.step).astype(int)
- end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start
- end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int)
+ step = self.model.receptive_field.step
+ half = 0.5 * self.model.receptive_field.duration
+
+ start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start - half
+ start_idx = np.maximum(0, np.round(start / step)).astype(int)
+
+ end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start - half
+ end_idx = np.round(end / step).astype(int)
# frame-level targets
- y = np.zeros((self.model.example_output.num_frames, 1), dtype=np.uint8)
+ num_frames = self.model.num_frames(
+ round(duration * self.model.hparams.sample_rate)
+ )
+ y = np.zeros((num_frames, 1), dtype=np.uint8)
for start, end in zip(start_idx, end_idx):
- y[start:end, 0] += 1
+ y[start : end + 1, 0] += 1
y = 1 * (y > 1)
sample["y"] = SlidingWindowFeature(
- y, self.model.example_output.frames, labels=["speech"]
+ y, self.model.receptive_field, labels=["speech"]
)
metadata = self.prepared_data["audio-metadata"][file_id]
diff --git a/pyannote/audio/tasks/segmentation/speaker_diarization.py b/pyannote/audio/tasks/segmentation/speaker_diarization.py
index fb635dc9c..8a091b1f7 100644
--- a/pyannote/audio/tasks/segmentation/speaker_diarization.py
+++ b/pyannote/audio/tasks/segmentation/speaker_diarization.py
@@ -355,10 +355,14 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
]
# discretize chunk annotations at model output resolution
- start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start
- start_idx = np.floor(start / self.model.example_output.frames.step).astype(int)
- end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start
- end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int)
+ step = self.model.receptive_field.step
+ half = 0.5 * self.model.receptive_field.duration
+
+ start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start - half
+ start_idx = np.maximum(0, np.round(start / step)).astype(int)
+
+ end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start - half
+ end_idx = np.round(end / step).astype(int)
# get list and number of labels for current scope
labels = list(np.unique(chunk_annotations[label_scope_key]))
@@ -368,7 +372,10 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
pass
# initial frame-level targets
- y = np.zeros((self.model.example_output.num_frames, num_labels), dtype=np.uint8)
+ num_frames = self.model.num_frames(
+ round(duration * self.model.hparams.sample_rate)
+ )
+ y = np.zeros((num_frames, num_labels), dtype=np.uint8)
# map labels to indices
mapping = {label: idx for idx, label in enumerate(labels)}
@@ -377,11 +384,9 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
start_idx, end_idx, chunk_annotations[label_scope_key]
):
mapped_label = mapping[label]
- y[start:end, mapped_label] = 1
+ y[start : end + 1, mapped_label] = 1
- sample["y"] = SlidingWindowFeature(
- y, self.model.example_output.frames, labels=labels
- )
+ sample["y"] = SlidingWindowFeature(y, self.model.receptive_field, labels=labels)
metadata = self.prepared_data["audio-metadata"][file_id]
sample["meta"] = {key: metadata[key] for key in metadata.dtype.names}
diff --git a/pyannote/audio/tasks/segmentation/voice_activity_detection.py b/pyannote/audio/tasks/segmentation/voice_activity_detection.py
index 6d0bdb98d..e52613aeb 100644
--- a/pyannote/audio/tasks/segmentation/voice_activity_detection.py
+++ b/pyannote/audio/tasks/segmentation/voice_activity_detection.py
@@ -164,18 +164,25 @@ def prepare_chunk(self, file_id: int, start_time: float, duration: float):
]
# discretize chunk annotations at model output resolution
- start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start
- start_idx = np.floor(start / self.model.example_output.frames.step).astype(int)
- end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start
- end_idx = np.ceil(end / self.model.example_output.frames.step).astype(int)
+ step = self.model.receptive_field.step
+ half = 0.5 * self.model.receptive_field.duration
+
+ start = np.maximum(chunk_annotations["start"], chunk.start) - chunk.start - half
+ start_idx = np.maximum(0, np.round(start / step)).astype(int)
+
+ end = np.minimum(chunk_annotations["end"], chunk.end) - chunk.start - half
+ end_idx = np.round(end / step).astype(int)
# frame-level targets
- y = np.zeros((self.model.example_output.num_frames, 1), dtype=np.uint8)
+ num_frames = self.model.num_frames(
+ round(duration * self.model.hparams.sample_rate)
+ )
+ y = np.zeros((num_frames, 1), dtype=np.uint8)
for start, end in zip(start_idx, end_idx):
- y[start:end, 0] = 1
+ y[start : end + 1, 0] = 1
sample["y"] = SlidingWindowFeature(
- y, self.model.example_output.frames, labels=["speech"]
+ y, self.model.receptive_field, labels=["speech"]
)
metadata = self.prepared_data["audio-metadata"][file_id]
diff --git a/pyannote/audio/utils/frame.py b/pyannote/audio/utils/frame.py
index e3987873d..5ff0189be 100644
--- a/pyannote/audio/utils/frame.py
+++ b/pyannote/audio/utils/frame.py
@@ -24,7 +24,9 @@
import math
-def conv1d_num_frames(num_samples, kernel_size=5, stride=1, padding=0, dilation=1):
+def conv1d_num_frames(
+ num_samples, kernel_size=5, stride=1, padding=0, dilation=1
+) -> int:
"""Compute expected number of frames after 1D convolution
Parameters
From e43138278a6990bd8ccc673d8a4dcd5824849d7f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herv=C3=A9=20BREDIN?=
Date: Thu, 25 Jan 2024 10:42:32 +0100
Subject: [PATCH 44/57] feat: add pyannote.audio.sample.SAMPLE_FILE (#1629)
---
CHANGELOG.md | 1 +
MANIFEST.in | 2 ++
pyannote/audio/sample/__init__.py | 56 ++++++++++++++++++++++++++++++
pyannote/audio/sample/sample.rttm | 10 ++++++
pyannote/audio/sample/sample.wav | Bin 0 -> 960104 bytes
tests/test_sample.py | 28 +++++++++++++++
6 files changed, 97 insertions(+)
create mode 100644 pyannote/audio/sample/__init__.py
create mode 100644 pyannote/audio/sample/sample.rttm
create mode 100644 pyannote/audio/sample/sample.wav
create mode 100644 tests/test_sample.py
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 29e5dc311..15b5b6aee 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@
- feat(task): add option to cache task training metadata to speed up training
- feat(pipeline): add `Waveform` and `SampleRate` preprocessors
- feat(model): add `num_frames`, `receptive_field`, and `dimension` properties to segmentation models
+- feat(sample): add sample file at `pyannote.audio.sample.SAMPLE_FILE`
### Fixes
diff --git a/MANIFEST.in b/MANIFEST.in
index 16909925f..45ad7d6af 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,6 @@
recursive-include pyannote *.py
recursive-include pyannote *.yaml
+recursive-include pyannote *.wav
+recursive-include pyannote *.rttm
global-exclude *.pyc
global-exclude __pycache__
diff --git a/pyannote/audio/sample/__init__.py b/pyannote/audio/sample/__init__.py
new file mode 100644
index 000000000..85399af66
--- /dev/null
+++ b/pyannote/audio/sample/__init__.py
@@ -0,0 +1,56 @@
+# MIT License
+#
+# Copyright (c) 2024- CNRS
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+
+from pathlib import Path
+
+from pyannote.core import Annotation, Segment, Timeline
+from pyannote.database.util import load_rttm
+
+from pyannote.audio.core.io import Audio, AudioFile
+
+
+def _sample() -> AudioFile:
+ sample_wav = Path(__file__).parent / "sample.wav"
+ uri = "sample"
+
+ audio = Audio()
+ waveform, sample_rate = audio(sample_wav)
+
+ sample_rttm = Path(__file__).parent / "sample.rttm"
+
+ annotation: Annotation = load_rttm(sample_rttm)[uri]
+ duration = audio.get_duration(sample_wav)
+
+ annotated: Timeline = Timeline([Segment(0.0, duration)], uri=uri)
+
+ return {
+ "audio": sample_wav,
+ "uri": "sample",
+ "waveform": waveform,
+ "sample_rate": sample_rate,
+ "annotation": annotation,
+ "annotated": annotated,
+ }
+
+
+SAMPLE_FILE = _sample()
diff --git a/pyannote/audio/sample/sample.rttm b/pyannote/audio/sample/sample.rttm
new file mode 100644
index 000000000..7c6b378fe
--- /dev/null
+++ b/pyannote/audio/sample/sample.rttm
@@ -0,0 +1,10 @@
+SPEAKER sample 1 6.690 0.430 speaker90
+SPEAKER sample 1 7.550 0.800 speaker91
+SPEAKER sample 1 8.320 1.700 speaker90
+SPEAKER sample 1 9.920 1.110 speaker91
+SPEAKER sample 1 10.570 4.130 speaker90
+SPEAKER sample 1 14.490 3.430 speaker91
+SPEAKER sample 1 18.050 3.440 speaker90
+SPEAKER sample 1 18.150 0.440 speaker91
+SPEAKER sample 1 21.780 6.720 speaker91
+SPEAKER sample 1 27.850 2.150 speaker90
diff --git a/pyannote/audio/sample/sample.wav b/pyannote/audio/sample/sample.wav
new file mode 100644
index 0000000000000000000000000000000000000000..150d49a69dc2da2310fbb526759ff26638595bdc
GIT binary patch
literal 960104
zcmZ79+0SKXcHj3?WN|1~ku1(bv(>bvSays!5+KDk5CjnKCCqRM#xd{;8*nk~h*svw5Cv`X3&3PV*ES{{-{?_VE`#q?7tKRqQy`N{T-}$n|K0!Mum1is^Y8!qfBbuY^KbwCzyG)Y*6V-ce|r7*{@#B-zx}=6
z`TeKnH-F_1{^8&G?Z5OFe(SgY!f*b@Z(RP?AN+$qxXk(cx_|xR`tkLn>j&5OuW!$v
zFRrd|<8nQ|KYn@g^31%RzC1Pmx^sDaesll&<@K{)zCOR+yMA@OKd&2?hv#+k^4R6p
z{JC-YdVc0C_pe{i-;d1C9zHJl@a56@_v7=&>(Tk?H*=0J=B!`OIh^H@`K@z!|8>rO
zZ+_<;pU+kA%{gz*?;Zc}y!$eL-<%^{+co^${~nqnzd6SB^_;=+#_{F#(_^k5&3D%`
z!iVRGCoZ>-<2uvj^2J>3{`{s9KRyG<~ZImaV2?mNeyXD`oR
zo*mTPp8uIaUwq0o>jhSZIo1hN``#JOobUcIS|c`3S9P6i{l$ETjZeqopUig%F+bLQ
zc;*Myo#nH6&)3&SkFoHnv55*6zBTW4UY>hzB51LduOFJR^Z(;BKA85p`C#_0_SInr
z))OOl=8R?x!w=010?d@%;oG?K{k@q73^%`U%a3bUHu~n652U$fFLB8RUTl7E-p4by
z=Bl?24=w8b^yZuc7H-d|ppO^sKd>3j_-O2w-{8pA#jrROQRZ=TX5b2qp)tXE^D!De
za-7JxH)CRH=yR=b*6)k8FXoD$9-}Ve+4a%meUHu<^UITSHC8u%?_#n3l?B(cdv?+5
ztfy7{ON?)Phq*_OS((#gznq;oIA^%5=6f>`WzHtn%F
z`|KbL7H$tJurL3_ZR{(OSdDGiQyhLi=X`(8_~D#UG@kQ@okx%OFe>o}mpCa)@sen~
z%vE?gYh5Ppp(Kv)TJRc2VMrrzrSb)w6cM}~U-2RgL7h>0|JwVjgD*mfS!4C}RAc8|
z{%x)u`O#s+W@si*(lsCm);15ADc|5wGwR(jFKmb}(Zw4^CT6@djt#>w_3YT6*KylyQh^AfYr&LiewOvd=t{4pkDg+V+Fee&9pL!?BsM
zd$==}#}3ZEJAdCjc>DIuCp>shS=YHgoM;RSFwKvPr82)cF8>wN<}8zaHSf=6Wrt_S
zZqR#o(DU?se`Llg2Ff{Xy%`hUVh}!C&fDKF@?GbX2gonh%lpPhqdh%W%g&D-HaLBY
zk7X!!hMY75U(3n2=h){Cu1V*y%zEtB{Hx5CLS&s-U*0eWXTT!n2EAr!E=2{L$a>HM
zFRslS{B<&}?CVuFo72pfm=&i_U0%GrJh}F%1A{j%JQZ%ur7OY9!=u>bUhGOeIUWMp
z|EamQT-ummCxnR+Jbq_p%-iV$`LLV@;tp=i<%z??{>@`|<|ocYJK^GTmyw5Qk+)v-oo?aF
zID}6j=(8jLu-HfQZxQbu56_j^+rRm7Rh}`*f~=mO(}^1;Y
zG4H`CkIo424iXi@Yc=7bj^|7%Jtr%b3
zoOi<~v}gZp`S@{G2%*&8o9oKC;Y_Zj?4BQ`e|N^g_VTTtrSqN`AHRQnbE+f?4+`OL
zl`-}bQ8X9N6yf(LK6vx#8Q(K=raQ+B>6O@t2j%TlJUt>WyE5w;9Z#~1(G|ran{IWz
zd*l1}uJ6ttyiSinpsVv_`K@fh4rz+@PxXhIWOc&_bF^b+m1bWyyiDBT^v$DkGd;oD
z>~wo(cK2YqXXkI@TrI}VAOoxgeHt2xe?AoZJLyf=>dIRkWKGFkD-
z`5tG-DY1uH7Y+C=1blwXTwY3l+?=DIA4Yp|(7>Z=Gm(Y^`03vGf)#Lyk$gIT;FSgo
zVZ}ZF-kdYMeEF5jD`S0Ux$%G#;ua43vdq)XwEaD(%@h^0mvWGm6LN>hkKlgX?$Z
zj~)Ph07v4L=8ut#Ry4qmn%!vAv-c+co}F=g=Q!sJbAEG5<&-bk6>p}6e2+7YE=EZ$
zLup>2pkT9^szcIHm}F7V?`RVhDh2TI%;C-J&kt@87az~ud0JhyYPx(RqQaPw7^^S2
z(n#107K^~G*S?wmd#Aaj@yaE)X0{L~gB433&A%`z>s50aT{=s2-JUo}Q{qh;$#*Yx
zzT9zV#!Evu$0Ku&53X;`AKj0+*n<&g3Fr#1p(LJG$Hu=Wv#P)0M8xQ;;2n7w(yDgw
za3f*|sCr~ZbZ7oRmwf;Ayq9I)pZPdGzt$mnVQ~G@;5#jXCH&!0bJ{GB27%A4TqHj-
zSHg67M@E$)KAD%RV2-MGJh9r@Jk-y1RInI!p;5MS!&qg6?PAGH>FUK%nh3VGE^cJB
z1BCDermU|Kcgn*e^xnafbtj5%+`pP%mc_+oP5*{hUr>{N<~htq+2QbN!Vl-Y56ydI
z{g>xOL5kRy=l^j0-2CR7!*eG;KmpIeYW3NvlV}^Z%>(JCDq5W7U$PDzN-5l)^Z4?c
zPX}`!k7e?PyxyJHM{}mMJM5WTk)=MBhs9-M=6CtR95E(;
z_2MV_OE&QC@T>}{dNLwEg~M{;Q==VTI-)k+e`l^+4dR$)gjLG$Wpg^HtjL$5sqP%k
zhHTe(V$MkizdRiK;{02E-pGn2If9*fKl?#Iy&189#k`sU^zrh|c^^#TG~Qlq=ABrl
zxNRspzdDz6@1;A{tpx6sG9e5uQY{*PR(JO{K=r(TIh!Kl^?$-uqzAT)vF8j7!(4
zF|)YNAZ)@gw8hur=AGficaGNw$5ERx@YUB7pE9-MDJDvb*RJz7Zxr=BQb!ppKX>q#
zPB#w9TTt`DWH-6??%0OP<8{pb@qxKG=gavYRa*adCU=ar%0mRI0*s1H>Bf46XKF2C)Xi?P}#46n>D`q)dg2y0$npUgan=zDI
zbQ*oJfn#E2KES)NtKXOV%-y_Ul}4!APG?+42gFY@!87Cgx?n88Yj7{C$(NArTKE9R
zEN6!E>1y0M>GvMlKQ_{VIEe?2Ji
zq+1q#oec_Tu=jt7B|MYXDDI6D9z~N`RToeWcqUGu@17o%iA~jxp0<9lTJ5ae@jvCz
ziw9*)9Y?)5k?`4E1+RSmODtjr%BY@r*NlD2M!X!O@;}}UUvvm1^vL|y*H?3P{he}^
zuE5g6{Yy>GOGT+%yA4ZmW
zf5s$Y#bmm1b(}5;JM(M(2ANN!uz>MG|JU<>m1t^P_0Fbho2?pU&c;o5>m;eK^us9E
zVzo$2H}C{5E8fnyh(SdRw!pO^)0K)T$GCdjVa#R_F03KRReZv>Sgx8xj#VQ&7EWPI
z#9|8ncU^uJH?#>08h7d`^vZqgqLWb{z;9&{c(^g|$H_8>&Ygd;K=W69L;rhIwNOti
z>3U-4k$Kni2Mf_1uC1rS53t&5mif!ft%2-XXWa|F^JPwr*GSK`9b?n=5`oU+ov!1(
z)!;Z5F6BDEt>TauaipGLoP2-Yk9F?N+25XQ_TCY%^=fc7UVdh>!ON3_9QWAxBF?vB
z`|cc#E&X9*o+=09l(@y1w!;QrSc!_KGaL2R9r0F7|LwI$`H%iE0#Q&`qYft}7LQ_N
zioCcK`8dH(@v%Jb{}j)=*PmX0GL_mdu0NZ9z4?H>)J!}@ozROo=$kMsZrb)LL}bIhJ^zuk9)n_W3gneqj`
zkWu7&d0v*^ey5q`zfZ@4KR;0L))B{IEVZwjC{|vWm;6`%N>);*8R6l+^$Wu>?iZ7-|R2^YXa#HS$;vw3P@`eLyhgiI2G#;z7~QC5
zd~6URpQQ)rJ=~x&P%FS1%gEd9H%L?CYCkU*<4Q*sZTy*rZLG#A4zOPsQzsX<=@cvp
zfvrs8W%xAu^2ImDcy^3<3p0Ikc&R!?uStw!P_f9~_=LuVME0RO;mkE@+EAZ;;QNEY
z)z9ZooxQ3X@6)%$W?MHeU;DrFg#g{(Rz+OTxOIEa8Mr0ms?};
zUrfFI%J36^>9;&S=fTwSU-c`mz_zb!&Nn>C_AHE>WD(=36RI!1Jzuu*J~3x>uw~q=
zY>o5oTvN}}IZ~CBWcW#wLxgLk3)6=wgZglF2lchswDCBG=BS@xl}3jK+wrNI@5Wp=
zZiW*+)Aw-%Py1q9R>v@5jR($(+F5X+BeOYv6T@{+WGr*9_Mx4vJIgOrc4zLq`4?yN
zPQJt#=`|U=RR+h(MIR4`=$CFkL;ikq*o<|}PiIRFp$?Tnx9i(_zWEwcSb!Va4K7`s
z;#J$K5h)M7eeqGQfO$-eW5RLW#MU|CHLVps;}+;xJi-U3th^
zsm~WjQFR^XHI_1#97|hm|AvzJV8$%2tB`2sbSK0+igtoQ_=6>yPL6`2R9Zg4;kb@%
zcBPSR(=m-X^`)-WuflKfBD{Qh_~-uQwdyAQV0eFWZ08Db8su4NdUmwNYcoet(kgd7
zvvOwZA1R7fX!y)k;_b4B{47`LnX-|HqJ-Ew{=!-?<$UbG9@KL+X1ph7I?`{=*G$z_
z+o`qkx>bSoDtL8d^;&$*bClF_wzDm6yt5jJ*HtvKKzgcc!iy_l4*nKjcq+b=Npu`s
z!};UBdk0p(n6W{in7e)89y*LA&1=1t|NQpxnLiwfgUf{;srK@F=lpbH>b>Ll_YbXg
zy~5$`LxI%W#|-hZbKusPhzC0U{W;I`qabe|HQ3Fg(tY?)B=i!M2q&$#$cAF0wKPAy
zcYWgMYTvlXA5YHJpFL)5{pI<|wcj1Y=z3dc<^_DEVui&wuRj`PbMN}XT>Zx7``7>P
z`r{c3Epr`Agb02so<5q0#~+Vfe)aO_$8Mj@yMH`C`CzjDe|Y&X=Vu>X|M{Hp_Wb^p
z!4A|Jr|c*mJ70Jay*dZ&Jy6?NC1Pt{&R&pAU!5x>D(5h|d;oi*P3Me?x;YFjLt}3>
z1paF;j@)qTU`SU=vGPsW<|FLZ4goR3_w_*K8Rr*|auOB@!R94{$ZA=3ZDr1MzMSRQ
zvW0lWsq{rvXR1a9>Hic=iaI-qsrSZ;_$e*LlKpEL7Vq#-W6M|GVZ2rI#m#j*qr2?<
z^pNN-f0@x!gFF3UJ4x|-b~Sfc#}HyWMiZy;$i`o}zX-3!t9w9C>MYnxmcGF*#fq$yIXoE&K#}ta{h^#0Ba-*E1Kq`ovgydz6mAt2~F}
zjoQD{=KNX>T{SN5`ugjszl=kzZ-S78Lg
z%o<~dGF45xSMsInL+Oo!DcB*d#)z;L9{rF1WHQ$E!j$cxsbaAX@W@ejZ{DM&TJgZq
z{2`Yb3qPnW;h-_5!8aB{QizFvRZsPzpirzf@{}d-nz!{_eJUCvo#@JPwR$H_RE^U*
z4O^5G$`tst^`7)3Ys#nZ%%3w})O$E3OKhbW*P92Hk>C09_E;aMiuYFcWEq;0)$AKq
z$KVKdN#n_mz8;?6=>yTjEH2;U3)xpEwM
zv}7}B7d7jg9BeNOOlP;=%SP?TJoB^bQ**^sy1T3B$Kt#Awtj}FN~z-)Sh9xiwQJk>
zgF^ED`;(>GuVfZ{RBsSA`DwYP%up>N7IlpBLFbMm@DEOh4V}Ly=Q^7S+G$r05JFvx
zwJ1C>gDaiO*uEYwS`TDyJw7d#jmx>M!RjA~#`-bk
zB8U;6T}jOGHJtM$#YFAuRcx(6YvMR6MSd_F3Pt1^d825b#;zLarLd*3*Y_|x=fP;L
zv#Pf2bQ1rqhO?y%&`xtUps?E+z>ZI_8Zd1)`yZR@MmM@%_eQWbE~$%V
z)L+hc-<<2dF