diff --git a/.github/workflows/flake8-pytest.yml b/.github/workflows/flake8-pytest.yml index 36734e8..0dd7bb0 100644 --- a/.github/workflows/flake8-pytest.yml +++ b/.github/workflows/flake8-pytest.yml @@ -30,6 +30,7 @@ jobs: python -m pip install flake8 pytest poetry # poetry install --all-extras pip install -e .[all] + pip install git+https://github.com/m-bain/whisperx.git - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names diff --git a/README.md b/README.md index 525e3f1..fbeaa46 100644 --- a/README.md +++ b/README.md @@ -42,24 +42,23 @@ For the default installation, which does **not** include the dependencies for th pip install -U ferret-xai ``` -Our main dependencies are 🤗 `tranformers` and `datasets`. +**Troubleshoothing** -If the speech XAI functionalities are needed, then - -``` -pip install -U ferret-xai[speech] -``` - -At the moment, the speech XAI-related dependencies are the only extra ones, so installing with `ferret-xai[speech]` or `ferret-xai[all]` is equivalent. - -**Important** Some of our dependencies might use the package name for `scikit-learn` and that breaks ferret installation. \ +Some of our dependencies might use the package name for `scikit-learn` and that breaks ferret installation. \ If your pip install command fails, try: ```bash SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True pip install -U ferret-xai ``` -This is hopefully a temporary situation! +### (Optional) Install XAI Speech functionalities + +If the speech XAI functionalities are needed, then follow these steps: + +1. install the library with: `pip install -U ferret-xai[speech]` or `pip install -U ferret-xai[all]` +2. install whisperX with `pip install git+https://github.com/m-bain/whisperx.git` +3. install system-wide [ffmpeg](https://ffmpeg.org/download.html). If you have no sudo rights, you can try with `conda install conda-forge::ffmpeg` + ### Explain & Benchmark diff --git a/examples/speech/audio_from_hf.ipynb b/examples/speech/audio_from_hf.ipynb new file mode 100644 index 0000000..67da144 --- /dev/null +++ b/examples/speech/audio_from_hf.ipynb @@ -0,0 +1,632 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Speech XAI" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/moscato/miniconda3/envs/ferret-testing-2/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "/home/moscato/miniconda3/envs/ferret-testing-2/lib/python3.10/site-packages/pyannote/audio/core/io.py:43: UserWarning: torchaudio._backend.set_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n", + " torchaudio.set_audio_backend(\"soundfile\")\n", + "torchvision is not available - cannot save figures\n" + ] + } + ], + "source": [ + "from datasets import Dataset, load_dataset\n", + "from IPython.display import display\n", + "import numpy as np \n", + "import os\n", + "import pandas as pd\n", + "from pathlib import Path\n", + "from pydub import AudioSegment\n", + "import torch\n", + "from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor\n", + "\n", + "from ferret import SpeechBenchmark, AOPC_Comprehensiveness_Evaluation_Speech, AOPC_Sufficiency_Evaluation_Speech" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "DATASET_ID = \"DynamicSuperb/IntentClassification_FluentSpeechCommands-Action\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cuda:0\n" + ] + } + ], + "source": [ + "# Note: set the ordinal of the device according to your system.\n", + "device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n", + "print(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset({\n", + " features: ['file', 'speakerId', 'transcription', 'audio', 'label', 'instruction'],\n", + " num_rows: 200\n", + "})" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "data = load_dataset(DATASET_ID, split=\"test\")\n", + "data" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'file': 'wavs/speakers/Xygv5loxdZtrywr9/77506ae0-452b-11e9-a843-8db76f4b5e29.wav',\n", + " 'speakerId': 'Xygv5loxdZtrywr9',\n", + " 'transcription': 'Increase the temperature in the washroom',\n", + " 'audio': {'path': '77506ae0-452b-11e9-a843-8db76f4b5e29.wav',\n", + " 'array': array([0. , 0. , 0. , ..., 0.02133179, 0.01977539,\n", + " 0.01849365]),\n", + " 'sampling_rate': 16000},\n", + " 'label': 'increase',\n", + " 'instruction': 'Recognize the action behind the verbal expression. The answer could be activate, bring, change language, deactivate, decrease, or increase.'}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sample = data[0]\n", + "sample" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this notebook we are using Wav2Vec2 which expects audio arrays to be in 16kHz. Luckly, this is the native sampling rate of our data. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Models" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at superb/wav2vec2-base-superb-ic were not used when initializing Wav2Vec2ForSequenceClassification: ['wav2vec2.encoder.pos_conv_embed.conv.weight_g', 'wav2vec2.encoder.pos_conv_embed.conv.weight_v']\n", + "- This IS expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of Wav2Vec2ForSequenceClassification were not initialized from the model checkpoint at superb/wav2vec2-base-superb-ic and are newly initialized: ['wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + } + ], + "source": [ + "## Load model\n", + "model = Wav2Vec2ForSequenceClassification.from_pretrained(\n", + " \"superb/wav2vec2-base-superb-ic\"\n", + ")\n", + "feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n", + " \"superb/wav2vec2-base-superb-ic\"\n", + ")\n", + "\n", + "if torch.cuda.is_available():\n", + " model = model.to(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Speech-XAI: the `SpeechBenchmark` class" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: if not specified otherwise, `SpeechBenchmark` assumes English as the source language." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "## Instantiate benchmark class\n", + "benchmark = SpeechBenchmark(model, feature_extractor, device=device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's start from transcribing the example above using WhisperX." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n" + ] + }, + { + "data": { + "text/plain": [ + "(' Increase the temperature in the washroom.',\n", + " [{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438},\n", + " {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141},\n", + " {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444},\n", + " {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848},\n", + " {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953},\n", + " {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text, word_timestamps = benchmark.transcribe(\n", + " sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + ")\n", + "text, word_timestamps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Explain word importance" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Word importance" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.47325889, -0.45515126, -0.10200202, -0.15734476, -0.1214807 ,\n", + " 0.0109534 ],\n", + " [ 0.07733703, -0.02064097, 0.34651214, -0.01588559, -0.01463729,\n", + " -0.02365428],\n", + " [-0.01432282, -0.01848161, -0.00988954, -0.00070852, -0.01123005,\n", + " 0.32860351]]), explainer='loo_speech+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + ] + } + ], + "source": [ + "explanation = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + " methodology='LOO',\n", + " word_timestamps=word_timestamps\n", + ")\n", + "# display(benchmark.show_table(explanation, decimals=3))\n", + "print(explanation)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 2.73476344e-01, -2.75996750e-02, 2.68968727e-02,\n", + " 4.38229965e-02, -9.83699882e-03, 3.43606337e-02],\n", + " [-4.55664854e-02, 2.00781865e-04, 3.07805077e-01,\n", + " -7.30899444e-03, 8.18159192e-03, 1.45066601e-01],\n", + " [ 7.67945654e-02, -1.63121489e-02, 1.69544356e-01,\n", + " 1.03233346e-02, 6.95427875e-02, 4.02942513e-01]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + ] + } + ], + "source": [ + "explanation = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"], \n", + " methodology='LIME',\n", + " word_timestamps=word_timestamps\n", + ")\n", + "print(explanation)\n", + "#display(benchmark.show_table(explanation, decimals=3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can run the same function but with no word timestamps. The class will generate them automatically." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Transcribing audio to get word level timestamps...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", + "Transcribed audio with whisperX into: Increase the temperature in the washroom.\n", + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.30518981, -0.05905296, 0.02406044, 0.06312683, -0.01027067,\n", + " 0.00634836],\n", + " [-0.00192932, 0.04791306, 0.30365684, 0.01351914, -0.02577573,\n", + " 0.13388129],\n", + " [ 0.0786875 , -0.029679 , 0.21510288, 0.02970933, 0.03952171,\n", + " 0.44306297]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + ] + } + ], + "source": [ + "explanation = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"], \n", + " methodology='LIME',\n", + ")\n", + "print(explanation)\n", + "#display(benchmark.show_table(explanation, decimals=3))" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(EvaluationSpeech(name='aopc_compr_speech', score=[0.3684087162837386, 0.24896559864282608, 0.5148161184042692], target=[3, 4, 3]),\n", + " EvaluationSpeech(name='aopc_suff', score=[0.014175561256706715, -0.004319146275520325, -0.01769007444381714], target=[3, 4, 3]))" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aopc_compr = AOPC_Comprehensiveness_Evaluation_Speech(benchmark.model_helper)\n", + "evaluation_output_c = aopc_compr.compute_evaluation(explanation)\n", + "\n", + "aopc_suff = AOPC_Sufficiency_Evaluation_Speech(benchmark.model_helper)\n", + "evaluation_output_s = aopc_suff.compute_evaluation(explanation)\n", + "\n", + "evaluation_output_c, evaluation_output_s" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Explain paralinguistic impact" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Perturbation type: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [01:15<00:00, 9.44s/it]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 pitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoise
action=increase0.310.240.410.330.330.330.240.27
object=heat0.250.190.33-0.02-0.02-0.02-0.000.23
location=washroom0.020.020.02-0.02-0.02-0.02-0.010.70
\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "explain_table = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + " methodology='perturb_paraling',\n", + ")\n", + "display(benchmark.show_table(explain_table, decimals=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Show variation" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "perturbation_types = ['time stretching', 'pitch shifting', 'reverberation', 'noise']\n", + "variations_table = benchmark.explain_variations(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + " perturbation_types=perturbation_types\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcYAAAGZCAYAAAATupELAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAACdEklEQVR4nOzdd1xV9f/A8dcF4QKXLYqguAUXpuYegCIouFJzm6s0R2auFEdqOVPTsvxm3wwzR4ozV5IJoomrr5aZZq4C92A4AAXO7w9+nLiCcC+xfT99nMfDc/h8PudzzgXefM75DI2iKApCCCGEAMCksCsghBBCFCUSGIUQQogMJDAKIYQQGUhgFEIIITKQwCiEEEJkIIFRCCGEyEACoxBCCJGBBEYhhBAiAwmMQgghRAYSGEWxsn37dlasWJHp+ODBg6lbt24h1MhwsbGxzJo1i99//z1Py33ePcnJ1atX0Wg0bN68Odt0s2bNwtraOrfVE6LYkcAoipXnBYEZM2awfv36QqiR4WJjY5k9e3aRCYyGeuONNwgLC8u38oUoakoVdgWEyAvVqlUr7CrkqYSEBCwtLQu7GgBUqFCBChUqFHY1hCgw0mIUxcbgwYP5+uuvOXv2LBqNBo1Gw+DBg9WvZXyUunr1ajQaDSdPnsTf3x8rKys8PDzYv38/qampTJ8+HWdnZ5ydnQkKCiI1NVXvXOfOnaNr167Y2dmh0+no2LEjly5dyrGOCxYsoHr16lhYWFCmTBnatWvHlStXuHr1KlWqVAGgZ8+eav2vXr2qPtJcvXo1w4YNo3Tp0jRp0gSApKQkpk6dSqVKldBqtdSqVUuvZZzdPQGIjIzE398fW1tbbGxsaNq0KT/88INenRMTE3nrrbdwcHDAxcWFiRMnkpycrH792Uep4eHhaDQafvjhB/r164eNjQ2VKlXiww8/zHQ/Vq5cSaVKlbCyssLPz49Tp06p1ypEUSUtRlFszJgxgzt37nD+/HnWrVsHQJkyZbLNM3DgQEaMGMGECRNYsGAB3bt3Z9CgQcTHx7NmzRqOHTvGzJkz8fT0pF+/fgBcvnyZFi1aULduXVavXo2JiQlz587F19eXP/74A61Wm+W51qxZw4wZM3j//fdp3rw5cXFxHDp0iPj4eGrWrMnWrVvp3r078+bNo02bNgC4uLhw48YNAIKCgujYsSMbNmxQA3WvXr04fPgwM2fOpFatWuzZs4cBAwbg4OBAQEBAtvfkp59+om3btjRr1owvv/wSe3t7Tp48yd9//61X72nTptG1a1c2bdrEkSNHmDVrFtWrV2fEiBHZ3tsRI0bw2muvsW3bNrZv387kyZOpV68eHTp0AOC7775jxIgRvPHGG7z66qucPn2aXr16ZVumEEWCIkQxMmjQIKVOnTo5Hg8ODlYAZcWKFeqxM2fOKIDSrFkzvbwvv/yy8sorr6j7AwcOVKpWraokJCSox27fvq1YW1srn3322XPrNnr0aKVhw4bP/fqVK1cUQAkJCcnyeIcOHfSOHzhwQAGUffv26R3v3bu30rhx4+dee7oWLVootWvXVpKTk7OtT8+ePfWOe3t7K76+vur+zJkzFZ1Op+6HhYUpgDJp0iT1WGpqqlK5cmXl9ddfV481btxYadu2rV7ZH3zwgQIowcHBWdZJiKJAHqWKEs3Pz0/9v7u7OwC+vr56adzd3YmKilL3Q0ND6dKlC6VKlSI5OZnk5GQcHBxo0KABJ06ceO65GjZsyKlTpxg/fjyHDx/m6dOnRtW1Y8eOevuhoaE4OjrStm1btR7JycnqI8mUlJTnlvX48WOOHj3KoEGDMDU1zfa8/v7+evu1a9cmOjo6x/pmzKfRaKhVq5aaLyUlhVOnTtGlSxe9PF27ds2xXCEKmwRGUaLZ29ur/zc3N890LP14YmKiun/37l2WLVuGmZmZ3nbo0CG9APqswYMHs3TpUvbt20fr1q0pU6YMY8eOJSEhwaC6Ojs76+3fvXuX+/fvZ6rHG2+8QXJysvoINisxMTGkpqbi6uqa43lzuh+5yXfnzh2Sk5MzPeouW7ZsjuUKUdjkHaMQz3B0dKRjx46MGjUq09dsbGyem8/ExISxY8cyduxYrl27xrfffsuUKVNwcnJixowZOZ5Xo9FkqkeZMmXYs2dPlumzCzL29vaYmJhw/fr1HM+bH8qUKUOpUqW4c+eO3vHbt28XSn2EMIYERlGsGNqa+TfatWvHb7/9RoMGDXJ8DPk85cuXZ8KECaxfv55z584B/7RYDa1/u3bt+PDDDzE3N6devXrPTZfVPdHpdDRv3pw1a9YwYcKEXF9HbpmamtKgQQN27NjB2LFj1ePbt28v0HoIkRsSGEWxUqtWLb766is2bNhAjRo1cHJyonLlynl6jtmzZ9O4cWPat2/P8OHDcXZ25ubNmxw8eJDWrVvTt2/fLPO9+eabODg40KxZMxwcHPjpp5/45Zdf1JZnuXLlsLe3Z8OGDVSpUgWtVpttwPPz86Nz58506NCBd999l3r16vHo0SPOnj3LxYsX+fLLL7O9JwsWLKBt27a0a9eOUaNG4eDgwP/+9z+cnJwYOnRont6zrEyfPp2uXbsybNgwevbsyalTp/j666+BtNa1EEWVfHeKYuX111+nZ8+ejBkzhsaNGzNr1qw8P0f16tU5fvw4pUuXZtSoUbRv354pU6bw6NGjbANZixYtOHz4MK+//jodOnRg3bp1LF26lNdffx1ICwbBwcFcuXIFX19fGjdunOOjzs2bNzNixAhWrFhBQEAAr7/+OqGhoXh7e6tpnndPWrVqpY45HDx4MN27d2fbtm1UqlTp398kA3Tp0oX//Oc/7Nu3j65du7J3717+85//AGBnZ1cgdRAiNzSKoiiFXQkhxIth1apVvPHGG1y5ciXPW/pC5BV5lCqEyBf3799n9uzZtG3bFhsbG06cOMHcuXPp2rWrBEVRpElgFELkCzMzMy5dusT69euJjY2lTJkyvPbaayxcuLCwqyZEtuRRqhBCCJGBdL4RQgghMpDAKIQQQmQg7xiLiNTUVK5fv46NjU2mGVCEEC8WRVF48OABrq6uMuazEEhgLCKuX7+Om5tbYVdDCFGEREVFySLRhUACYxGRPgfnxT0nsdFZ55BaX+KNB0afT1fV0eg8AA9+e/7E1dmxqGCfq3ymlmZG50m88TB357LK3Y+Dtqxxn1e6lIo6o/M8+Sl3c58m38/dPTGxynrtyZxY+Bo/iUBury010bhVTNKZWBj/vQVg5Zn9GqBZSfozxqj0Dx4/xKNni2zn5hX5p8AC4+DBgzl58iS//fZbQZ2yWEl/fGqjs8bW2rgfBnMr4zsW64w8h8rK+CAMYKnL3flMrYz/5WVulatTYarL3S9KrXUuA6Ot8fme6HJ3/5/mcnpZ09wGRltbo/Pk9tpSTZ7kKp+JpXmu8lnZGH9tSbrkXJ1LXqsUjgILjDNmzODRo0cFdTohhBAiVwosMFarVq1AzpOQkIClpWWBnEsIIUTJU2DdnQYPHkzdunUBWL16NRqNhlOnThEQEIBOp6NGjRqsWbMmU77du3fTsmVLrKyscHBwwMfHh1OnTgGoEyTv3r2bV199FVtbW3r27AlAbGwso0aNwsXFBa1Wy8svv0xoaGimsv38/Chbtiy2trY0bdqU77//Xi9NbGwsw4YNo3z58lhYWODm5kafPn300kRHRzNgwACcnJywtLTEy8uLn3/+Oc/unRBCiIJTqP2A+/fvj7+/P9u3b6dBgwYMHjxYXbsOYOPGjXTu3JmyZcuyfv161q1bR8uWLbl27ZpeOcOHD6datWps27aNiRMn8uTJE/z8/Ni1axdz587lu+++o3bt2nTs2JEzZ86o+a5cuULnzp355ptv2LJlCy1btiQwMJDw8HA1zfjx49m1axfz5s1j3759LFq0CK32n/cuMTExtGrVitOnT7N8+XK2bNmCTqejbdu22S7KmpSURHx8vN4mhBCi8BVqr9S33npLXauuRYsW7N69my1btjB9+nQURWHixIn4+/uzbds2NU9gYGCmcrp06aI3/2JwcDCnT5/ml19+oXbt2gC0b9+eP//8kw8++IBNmzap50+XmppKmzZtOHv2LF988QU+Pj4AHD9+nH79+jFo0CA1bcYW47Jly4iNjeX48ePqiuq+vr64u7uzePFiPvzwwyyvff78+cyePduo+yWEECL/FWqL0d/fX/2/TqejUqVKREdHA/DHH38QHR1t0IKqHTt21NsPDQ3F09MTd3d3kpOT1c3Pz48TJ06o6aKjoxk0aBDly5enVKlSmJmZERoayoULF9Q0DRs2ZPXq1SxevDjLHrWhoaG0adMGR0dH9TympqZ4e3vrnetZQUFBxMXFqVtUVFSO1ymEECL/FWqL0d7eXm/f3NycxMS0fuX37t0DwNXVNcdynJ2d9fbv3r3LqVOnMDPL3P3e1NQUSGshdunShbi4ON5//32qV6+OTqfjvffe4++//1bTL1++HEdHR5YsWcKkSZNwc3MjKCiIkSNHquc6evRolufKrsORVqvVeyQrhBCiaCiyA/xLly4NkOMK55B5rI+joyP16tVj1apVz81z8eJFTp06xfbt2+natat6PCEhQS+dnZ0dy5YtY9myZZw5c4aPP/6YUaNGUbduXVq3bo2joyMdOnTggw8+yHQOCXxCCFH8FNnA6OHhQYUKFQgODqZXr15G5W3Xrh179uzB1dX1uS3O9ABobv7PIN+//vqLn376CXd39yzzeHp6snTpUlatWsW5c+do3bo17dq1Y+3atdSqVQudzviZTIQQQhQtRTYwajQaFi9eTN++fenRowcDBw5Eq9USGRlJ48aN6dSp03PzDhw4kJUrV+Lj48PEiRNxd3cnNjaWU6dO8eTJE+bPn0/NmjWpUKECU6ZMISUlhYcPHzJz5kzKly+vV1bLli3p1q0bdevWxdTUlDVr1mBubk7r1q2BtF6r69atw9vbm7Fjx1KxYkXu3LnDsWPHcHV1Zdy4cfl6n4QQQuStIhsYAXr37o2VlRVz586lT58+WFhY0LBhQ7p165ZtPq1Wy4EDB5g1axZz587lxo0bODk50aBBA7UXrFarZevWrYwePZqePXvi5ubG9OnTOXDgACdPnlTLatmyJWvWrOHKlSuYmJjg6enJzp07qVWrFpD2yPfo0aNMnz6dyZMnc+/ePcqWLUuzZs1yrKcQQoiiR6MoivETbYo8Fx8fj52dHddvRmFr5DyTmlx0LlZINTpPcWGCaa7ypZKSxzXJninGz82aQu4mzM7N9wiU7O+TgmTs/Y+Pj8elXHni4uKM/n0g/j1Z6EsIIYTIoMQHxoxT0eW32NhYZs2axe+//14g5xNCCJH3SnxgLEixsbHMnj1bAqMQQhRjEhiFEEKIDF6YwBgeHk6DBg3Q6XQ0adJEb/ULRVFYvHgx7u7uaLVaqlatytKlS/Xynz9/nj59+uDm5oaVlRW1a9dmyZIlpKamdU64evUqVapUAaBnz55oNBo0Gg1Xr14tsGsUQgjx7xXp4Rp55ebNm7z99ttMmTIFOzs7goKC6NatG5cuXcLMzIyxY8fy5ZdfMm3aNJo2bcqRI0eYPHkylpaWjBgxAoBr167h4eFB//79sbGx4fTp08ycOVMd/+ji4sLWrVvp3r078+bNo02bNgC4uLhkWaekpCSSkpLUfVldQwghioYXIjDev3+fgwcPUqdOHSBtwvI2bdpw7NgxXFxc+PTTT/n8888ZPnw4kDZzzuPHj5k9ezbDhw/HxMQEX19ffH19gbQWZqtWrXj8+DGffvopM2fORKvV0qBBAwBq1KhBs2bNsq2TrK4hhBBF0wvxKNXV1VUNioC6FFV0dDT79+8HoEePHnorcbRr146bN2+qq14kJiYyc+ZMqlevjlarxczMjGnTpnHjxg0ePnxodJ1kdQ0hhCiaXogWY1areEBasLt79y6KouDk5JRl3qioKCpVqsTkyZP573//y8yZM3n55Zext7dnx44dzJkzh8TERKytrY2qk6yuIYQQRdMLERiz4+joiEaj4fDhw3oTiqfz8PAAICQkhDfffJPJkyerX9u9e3eB1VMIIUTBeOEDY/p7w3v37tG5c+fnpktISNALnCkpKXz77bd6aTK2RIUQQhRPL3xgdHd3Z/To0bz22mtMmjSJpk2b8vTpUy5cuEBYWBjbt28HwM/Pj//+97/Url0bJycnVqxYoderFKBcuXLY29uzYcMGqlSpglarpV69elm2RIUQQhRNL0Tnm5x88sknzJkzh2+//ZaOHTsyYMAANm7ciLe3t5pm+fLleHt7M2bMGF5//XU8PT2ZOnWqXjkmJiYEBwdz5coVfH19ady4sUELLQshhCg6ZHWNIiJ9dY0bN6/lYnUNjdHnU5CPvbDlZhWQgl4BRBQOWV2jcEmLUQghhMigWARGHx8fOnXqVNjVEEII8QIoFoFRCCGEKCglOjAmJCQUdhWEEEIUM0YFxkOHDqHRaLh06ZJ6rHPnzmg0Gs6ePase69u3Lx07dgRgypQpeHp6Ym1tTfny5enbty83btzQK/enn37Cy8sLOzs7bGxs8PT05Ouvv850/s2bN+Ph4YG1tTVt27bVq8fVq1fRaDSsXr2aYcOGUbp0aZo0aQKkzZU6dOhQnJycsLS0pEWLFkRERGQqf+XKlXh4eKDVaqlcuTJz5sxRV88AWL16NRqNhpMnT+Lv74+VlRUeHh7s37+f1NRUpk+fjrOzM87OzgQFBenlFUIIUTwYFRibNGmChYWFGlRSU1M5fPiw3jGAiIgIvLy8ALh9+zZTp05l9+7dfPzxx1y9ehVvb2+Sk5OBtN5XHTt2xNbWlg0bNrB9+3aGDx9ObGys3rlPnz7NokWLWLBgAatXr+bixYsMGDAgUx2DgoJQFIUNGzawaNEiUlJSCAgIYOfOnSxcuJCQkBCsra3x8/PTW3pq+fLljBgxgvbt27Nz504GDx7MrFmzePfddzOdY+DAgXTq1Ilt27bh6upK9+7dGTt2LFFRUaxZs4bRo0ezYMGCTBMAZJSUlER8fLzeJoQQovAZPVzD29ubqlWrEhwczOnTp2nSpAlDhw4lNjaWb7/9losXL1KjRg2OHDlC8+bN9fKmpKRw8+ZNKlSowL59+/D39+fkyZM0btyYX3/9FU9PzyzP6ePjw8mTJ7ly5QplypQB0lpvQ4YMISoqigoVKqjrIXbo0IG9e/eqeb/77ju6du3K999/T/v27QF4+vQp1atXp1GjRmzZsoWUlBTKlStHu3bt2LBhg5p36tSpLFmyhOvXr1O6dGn1nCtWrGDkyJEA/Pbbb3h6etKsWTMiIyPVvI0aNcLNzY1t27ZleU2zZs3KcnUNGa7x4pDhGuJ5ZLhG4TL6HaOXl5faOoyIiKBRo0YEBAToHbOysqJRo0YA7N27lxYtWmBnZ0epUqWoUKECABcuXACgWrVq2NraMnLkSDZt2sSdO3eyPG/9+vXVoAj6K2RklP4IN92hQ4ewtbVVgyKAmZkZ3bt35/Dhw0DaIsR3796lZ8+eenl79+7NkydPOH78uN5xPz8/9f/u7u7AP1PLZTye3YoZsrqGEEIUTUYHRm9vby5fvsy1a9fUR6atW7fm5s2b/Pnnn0RERNCsWTPMzMw4ceIEXbp0wdXVlW+++YbIyEiOHj0K/DOfqIODAz/88AM2Nja89tprlCtXDh8fH86cOaN33uxWyMjI2dlZbz8mJoayZctmug5nZ2fu37+vpskqb/p+erqs6pJej6zql92cqVqtFltbW71NCCFE4TM6MDZv3hwzMzMiIiI4dOgQXl5eODo6UqdOHQ4ePEhERAStW7cGYNu2bdjZ2bFp0ya6dOlCs2bNKFeuXKYymzRpwt69e4mNjWXnzp3cvn2bV155JVcXpNHoP1Z0dHTk9u3bmdLdunULR0dHNQ2QKd2tW7f0vi6EEKLkMzow6nQ6GjZsyMqVK7l37x6tWrUC0lqS69at48qVK2rHm4SEBMzMzPSC1bp1655btqWlJYGBgYwcOZIrV67kySoVrVq1Ij4+ntDQUPVYcnIy27ZtU+vu4eFBmTJlCAkJ0cu7adMmzM3N1d6tQgghSr5cra7h5eXFokWLaNiwofoI0MvLi88++wwzMzO1042fnx/Lli1jzJgxdOvWjcjISL755hu9snbv3s2qVavo1q0bFStW5ObNmyxfvpyWLVtiYWHxLy8v7Z1jkyZNGDBgAAsWLMDZ2Znly5dz48YNdRJwU1NTZsyYwdtvv03ZsmUJDAzk6NGjLFy4kHfeeYfSpUv/63oIIYQoHnIVGL29vVm0aJHaMgTU/zdq1AhLS0sAAgMDWbhwIcuXLyc4OJiWLVuya9cutcMKQPXq1TExMWHatGncvn2b0qVL4+/vz/z58//NdalMTU3Zs2cPEydOZNKkSTx69IiGDRsSGhrKyy+/rKYbM2YMZmZmfPTRR6xYsQIXFxdmzZqVaQWN/JLeOfjBgwdG55VeqcWT9EoVz5P+e0DWeCgcsrpGEREdHY2bm1thV0MIUYSkD0cTBUsCYxGRmprK9evXsbGxydSBKD4+Hjc3N6KiogzuvZqbPAWdrzjUMbf5pI5Sx3+TT1EUHjx4gKurKyYmJXrmziIpV49SRd4zMTHJ8S/D3AzryO1QkILMVxzqmNt8Use8yfci1tHOzs7oskTekD9FhBBCiAwkMAohhBAZSGAsBrRaLTNnzkSr1eZrnoLOVxzqmNt8UkepY37lE/lPOt8IIYQQGUiLUQghhMhAAqMQQgiRgQRGIYQQIgMJjEIIIUQGEhgL0Pnz5/Hz80On01GuXDneffddnjx5kmO+ypUro9FoMm0ZVx8JDw/PMk2fPn0Kpd7Pq49Go6FmzZoFXh+Ae/fuMWLECCpWrIhOp6Nu3bp8/vnnBtU7L+5jUXPx4kVGjBhB/fr1KVWqFHXr1jUo34oVK+jUqRNlypRBo9GwefPmTGny8z7mtt4DBgygRo0a6HQ6HBwc8PLy0lt1p6Drk9GyZcvQaDR06tRJ7/iL9P1YlMjMNwUkJiaGtm3bUqNGDbZu3cq1a9cYP348jx8/5tNPP80x/6uvvsqECRP0jmXVzTs4OFgv8Dg5ORVKvRs2bEhkZKTesfj4eAICAggICCjw+gD07NmT8+fPM2/ePCpWrMiePXsYOXIkpqamDBs2TC9tXt/Houjs2bPs3r2bpk2bkpqaSmpqqkH51qxZA6QtEpD+/+fJj/uY23o/efKE8ePHU6NGDRITE1m1ahWBgYGEhYWpa8gWZH3S3bx5k9mzZ2e5oHq6F+H7sUhRRIGYN2+eotPplHv37qnHVq5cqZiamirXrl3LNm+lSpWU0aNHZ5smLCxMAZQTJ07kSX3T/Zt6Pys4OFgBlOPHjxd4fW7cuKEASnBwsN5xLy8vpW3btup+ft3HoiglJUX9/6BBg5Q6deoYle/KlSsKoISEhGRKk5/3Mbf1flZycrLi5uamDBs2rFDr89prrykDBw5UvL29lY4dO+p97UX6fixK5FFqAdm7dy/t2rXD0dFRPdarVy9SU1Pz5HFOfsnLeq9fv54aNWrQuHHjAq/P06dPgczzT9rZ2RXq0j579+5VH49NmzZNPd6tWzc0Gg06nY4LFy7ky7lzOzl1YU9qnVfnNzU1xd7e3qDH8PlVn8OHD7N9+3YWLFjwr+og8pYExgJy/vz5TO/W7O3tcXFx4fz58znmX7duHVqtFmtrawIDAzlz5kyW6QIDAzE1NaVChQpMmjSJhISEQq13ulu3bnHgwAH69etXKPVxc3PD39+fefPm8fvvv/PgwQM2bdpEaGgoo0ePzpQ+r+/j8wQEBDB8+HAAFi1axJkzZ9i0aRPbt28HYOHChXrrlxY3BXUfDaUoCsnJydy7d4/Fixfz559/8uabbxZKXVJSUnjrrbeYNm0aLi4u2aYtavexpJN3jAUkJiYGe3v7TMcdHBy4f/9+tnm7dOlC06ZNqVixIpcvX2bu3Lm0atWKU6dOUbVqVSCt5fPuu+/i5eWFpaUlBw4cYPHixZw7d45du3YVSr0z2rhxIykpKf86MP6b+mzdupXevXtTp04dIK3FsHz5cnr06KGmya/7mJ0lS5awf/9+Ll++zJAhQ4iKigKgXbt2WQbt4qAw7qMhVq1apb5Ptra2ZuPGjTRv3rxQ6rJixQoePXrEuHHjnpumqN7Hkk4CYzHwySefqP9v3bo1/v7+1KxZk8WLF7NixQoAGjRoQIMGDdR0bdu2xcXFhbfeeovjx4/TpEmTAq93RuvWrePll18utNaPoigMGTKEP//8k/Xr1+Pi4sIPP/zAO++8g4ODg9rLrzDuo7W1NWvWrMHLy4uff/4ZSPuFGBwcnGltzuKiqH4/vvLKK9SvX5+7d+8SEhJCr1692LZt27/qEJYbt2/f5r333mPNmjWYm5s/N11RvY8lnTxKLSAODg7ExcVlOh4TE6P3vswQLi4utGrVSv0l+jy9evUCyDFddvKi3pcuXeL48eP0798/1/X4t/XZvXs3ISEhbN68mb59++Lj48PcuXMZOHBgpt6+z8qL+5iTli1b0qhRI3W/S5cuJW7l9oK4jzlxcnKiUaNGdOjQgVWrVhEQEMCkSZMKvB7vvfce9erVo3Xr1sTGxhIbG0tycjLJycnq/5+nKNzHkk4CYwGpWbNmpndgcXFx3Lhx41+P68tPeVHv9evXY2Jikidjr3Jbn99//x1TU9NMY8waNGjA9evXefz48b+u27/x9ddfc/z4cXV/3bp1HDlypBBr9GJ4+eWXuXjxYoGf9/z580RERODg4KBuP/30E/v27cPBwYH9+/cXeJ3EPyQwFpCAgAD2799PbGyseiwkJAQTExP8/f2NKuv69escPnw4x96d3377LcC/6gWaF/XesGEDPj4+OXYwyM/6VKpUiZSUFH799Ve94z///DNly5bFysrquXnz4j5mJyoqirFjxwJpLcVatWqRmprKoEGDCj1g56X8vo+5cfjwYfU9fUFatmwZYWFhettLL71Es2bNCAsLy/YRaVG8j9nJ7QQIiqKwYMECKlasiKWlJc2bN+fo0aP5XNt/Ti4KwP379xUXFxfF29tb2bdvn/LVV18p9vb2mcYntm3bVqlWrZq6v379eqVfv37K2rVrlQMHDihffvmlUq1aNcXBwUG5fPmymq5///7KzJkzlR07dij79u1TJk+erJibmyuvvPJKodQ73f/+9z8FUL788st/VY9/W5/4+HilYsWKSvXq1ZVvvvlG2b9/v/Luu+8qJiYmygcffKCmy6/7+DypqamKr6+vAigODg7KjRs3lMjISMXExEQBlFGjRuXLeRVFUR49eqSEhIQoISEhio+Pj+Lm5qbu3759W1GUrD/XEydOKCEhIcqKFSsUQJkwYYISEhKihIeHq2ny8z7mpt67du1SevXqpaxZs0YJCwtTtmzZovTo0UMBlA0bNhR4fbKS1TjGgv5+zA/bt29XKlSooPTo0UPx9PQ0eJzn/PnzFXNzc+Wjjz5S9u/fr3Tr1k2xsbFRLl26lM81VhQJjAXo999/V3x9fRVLS0ulbNmyysSJE5WkpCS9NN7e3kqlSpXU/cjISMXHx0dxcnJSSpUqpTg5OSm9evVSzp8/r5dv3rx5Sp06dRRra2vFzMxMcXd3V2bNmpWp/IKqd7qJEycqWq1WiYmJ+df1+Lf1+fPPP5VevXoprq6uipWVlVKnTh1l2bJlSnJyspomP+9jVpYvX64ACqCsXr1aPT5hwgQFUDQajRIaGpov504foJ/VFhYWpihK1vdx0KBBWebx9vZW0+TnfcxNvc+dO6d07dpVcXV1VczNzRVXV1elQ4cOesG8IOuTlawCY0F/P+aH3EyAkJCQoNja2ipBQUHqsaSkJKVSpUrKyJEj86WeGclCxUIIIQrE4MGDOXnyJL/99lu26Q4cOICvry+nTp2ifv366vHx48ezdetWrl69mq/1lOEaQgjxgktMTDRqBiBFUTINJdJqtVnO35wb6R3snu1QV6tWLf7++28SEhKwtLTMk3NlRQKjEEK8wBITE3G2dCCexJwT/z9ra2sePnyod2zmzJnMmjUrT+oUExODVqvFwsJC77iDgwOKohATEyOBUQghRP548uQJ8SQy26QjFpjlmD6Rp8x8uJuoqChsbW3V43nVWiwKJDAKIYTASqPFUpNzYDT5/7UnbG1t9QJjXnJwcCApKYnExES9VmNMTAwajQYHB4d8OW86GceYT5YuXUrFihUxNTXllVdeyZMyly1bxp49e/KkLCHEvzd48OBcLUxcFJlqTAze8lv6u8U//vhD7/j58+fVcY35SQJjPvjzzz+ZMGEC/fv359ChQ3z44Yd5Uq4ERiFEfjHRaAze8luLFi2wtbUlJCREPfb06VO2bt1KYGBgvp9fHqXmgz/++ANFURg2bFihzKphqPzu2SWEKFzG/IynBb2c20omGBcYHz9+rP5B/9dffxEfH8/mzZsB8Pb2pkyZMvj6+vLXX3+p0/NZWFgQFBTErFmzKFOmDJ6enqxYsYJ79+4xceJEo86fG9JizGODBw+mc+fOAFSrVg2NRsNnn33GW2+9hYeHB1ZWVlSuXJkRI0Zkmgz7u+++o1GjRlhbW2Nvb0+jRo3Ub6jKlSvz119/8dlnn6kL265evVrNu3r1aurVq4eFhQXly5dn2rRppKSk6H1do9EQGRmJn58fOp2uUCZPFqK4OXv2LIGBgZQuXRorKys8PDyyfQp05swZ2rdvj06nw87OjldffZW///5b/frrr79O69at1f27d+9iYmKiN8Xbw4cPMTMz02sxnTt3jq5du2JnZ4dOp6Njx45cunRJ79wajYYFCxYwefJkypUrR9myZQ2+zlImpgZvxrh9+zY9e/akZ8+ehIeHExUVpe6fPXsWSFub8tmJ0ydPnszMmTNZvHgxgYGBREdHs2/fvgJpbEiLMY/NmDGD2rVrM3nyZLZu3YqLiwvVqlXjvffeY+7cuZQpU4aoqCjmzp3LK6+8QlhYGJC2AsWrr75K3759mT9/Pqmpqfzyyy/ExMQAsG3bNgIDA2nVqpW6GkS1atUA+Oijj3j33XcZN24cS5Ys4dy5c2pgfHZl8H79+jF8+HCmTp2a7fygQog0nTt3xtnZmVWrVmFnZ8fFixeJjo7OMm1UVBReXl5Uq1aNtWvXkpiYyLRp0/D29ubXX3/FxsYGLy8v1q1bp3YsiYiIQKvVcurUKR48eICNjQ1HjhwhOTkZLy8vAC5fvkyLFi2oW7cuq1evxsTEhLlz5+Lr68sff/yh1yP0448/plmzZqxatSrbVTqeZaIxMbDFaFx7qnLlyuQ0j0x4eHimYxqNhqCgIIKCgow6X57I97l1XkDbtm1TAOXKlStZfv3p06fK4cOHFUD5448/FEVRlJCQEAVQ4uPjn1tupUqVMs0JGh8fr1hbW+tNnaQoivKf//xHsbS0VO7evasoiqIEBwcrgLJgwYJ/cWVCvFju3LmjAMp3332X5defneJs3Lhxik6nU+7du6ceO3funKLRaJRPPvlEURRFuXz5sgKo09GNHTtW6du3r1K6dGll7969iqIoyrRp0xR3d3e1jIEDBypVq1ZVEhIS1GO3b99WrK2tlc8++0w9Bii1a9dWUlNTDb7GuLg4BVC+sHpNWat7PcftC6vXFECJi4sz+BzFjTxKLSDffPMNDRo0wNraGjMzM1q1agXAhQsXAKhXrx6mpqb069ePnTt3ZrnmYFaOHDnCw4cP6dmzp7qeW3JyMu3atSMhISHT1EsdO3bM2wsTogQrXbo0lSpVIigoiK+//vq5LcV0hw4dom3btnprg9asWZOXXnqJw4cPA1ClShUqVKhAREQEABEREfj4+NC6dWsOHjyoHktvLQKEhobSpUsXSpUqpf6MOzg40KBBA06cOKFXh4CAgFwtcJ3eYjRkK+lK/hUWAdu2bWPgwIE0adKETZs2cfToUbZt2wakzToB4O7uzq5du4iLi6Nbt26UKVOGLl266L2byMrdu3cBaNiwIWZmZupWo0YNIO3RTkbOzs55fXlClFgajYbQ0FBq1arF6NGjcXNzo1GjRmpQe1ZMTEyWP2POzs7cv39f3ff29iYiIoL4+Hh++eUXvLy88PLyIiIigqSkJI4fP64XGO/evcuyZcv0fsbNzMw4dOhQnv2MF6VeqYVN3jEWgJCQEOrXr8/KlSvVY+l/GWbUoUMHOnToQHx8PN9//z3jxo1jyJAh/Pjjj88tO/0v061bt+Lm5pbp61WqVNHbz81fkkK8yNzd3QkJCeHp06ccOXKEqVOn0rlzZ65du5YpraOjI7dv3850/NatW7i7u6v7Xl5ejB8/nvDwcJycnKhZsyaPHj1i8uTJhIWFkZSUpNdBx9HRkY4dOzJq1KhMZdvY2Ojt5/ZnPL/eMRZHEhgLQEJCAubm5nrH1q1b99z0tra29OrVi2PHjrFhwwb1uLm5udrCTNe8eXOsrKyIjo6mW7dueVtxIYTKzMwMb29vpkyZQpcuXbh+/XqmNK1ateKLL74gJiZGnZ3ljz/+4Ndff2Xo0KFqOi8vLx49esRHH32ktgzr16+PpaUlCxYswM3NjcqVK6vp27Vrx2+//UaDBg0wNTWuV6ihSpmYUkqTc9mllPw5f1EigbEA+Pn5MXr0aD744AOaN2/Onj17MrUCV65cSWRkJB06dMDFxYUrV66wdu1avVXpa9WqxYEDB/jhhx9wcHCgSpUqlC5dmvfff593332X6OhofHx8MDU15fLly+zYsYMtW7ZI71MhcunXX39lwoQJ9O7dm2rVqhEXF8f8+fOpXLmy2is8o3HjxhEcHIy/vz/Tpk0jMTGR6dOnU7FiRQYPHqymq1mzJmXLluXgwYN88sknAJiamtKyZUv27t1L//799cqdPXs2jRs3pn379gwfPhxnZ2du3rzJwYMHad26NX379v3X12rorDamL0CLseRfYRHw5ptvMmHCBJYvX0737t2Jiopi/fr1emnq1avH3bt3GT9+PP7+/sycOZO+ffuyYsUKNc28efOoUKECPXr0oHHjxuzcuROACRMmEBwcTFhYGD169KBnz5588cUXNG7cOFNLVQhhuHLlylGuXDnmz59PQEAAb775Jm5uboSGhmbZcnNzc+PgwYM4ODjQv39/hg8fzksvvUR4eHimR57pLcWM7xK9vb0zHQOoXr06x48fp3Tp0owaNYr27dszZcoUHj16RL169fLkWtMH+Oe8lfzXMbJQsSiWtm/fzvXr17N855JbsbGxLFu2jF69elG7dm2j8hq6AKtGo2HRokUFMntHuvXr1zNz5kyuXr1KnTp1OH36dJ6Ue/XqVVavXs3w4cNxdXXNkzJFwYuPj8fOzo4Qh5FYmeS8Qsbj1CR6xvyHuLi4fJtEvLBJi1EUS9u3b9drTeeF2NhYZs+eze+//56n5WYUGRmZ6TFZfnr48CFDhw6lVatWhIeH88033+RZ2VevXmX27NlZvmsTxY/GwKEamhdguIa8YxQlmqIoPHnypMisFdesWbMCPd/Vq1dJSkritddeo2XLlgV6bmMlJSVhZmaGiUnJ/8VbFBncK/UFCIwl/wpFsZTd/JSDBw/m66+/5uzZs+q8sekdG9KXAdqzZw8vvfQSWq1WfRcbGRlJ27Zt1Tks+/Xrp3atv3r1qjq0pWfPnmq5V69eBdJ+aU+fPp2qVaui1WqpUKGCXmeKdOHh4TRo0ACdTkeTJk34+eef9b6u0WhYvHixuu/j40OnTp3YvHkzHh4eWFtb07Zt20xzYEZHR9OpUyesrKxwc3Nj6dKlvPPOO3o9F581a9YsPD09AfD19UWj0agrrE+ZMgVPT0+sra0pX748ffv25caNG5nK2L17Ny1btsTKygoHBwd8fHw4deoU4eHhtGnTBoDGjRur9yvdX3/9xauvvqrO69m+fXvOnDmjV3blypV56623+PDDD6lUqRKWlpZ6Y/1EwZJxjP+QFqMokrKbn3LGjBncuXOH8+fPq8NeypQpo+a9fv06b7/9ttobsGLFikRGRuLj40NgYCAbN27k0aNHTJ8+na5duxIZGYmLiwtbt26le/fuzJs3T/2l7+LiAkCPHj04cOAAU6dOpVmzZty5c4etW7fq1fnmzZu8/fbbTJkyBTs7O4KCgujWrRuXLl3CzOz5C8CePn2aRYsWsWDBAlJSUhg/fjwDBgwgMjISSGv1du3alVu3brFy5Urs7OxYtGgRf/31V7atqzfeeINq1aoxcOBAPvvsMxo2bEiFChWAtImdp06diqurK3fu3GHJkiV4e3vz+++/U6pU2q+FjRs30rdvX7p27cr69esxNzfnp59+4tq1a3h5efHZZ58xevRogoOD1fXzAB48eICPjw8mJiZ8/vnnWFhYMHfuXLy8vPj111/1xttu2bKFGjVq8PHHH2NqaopOp8vhO0PkFzOTUpiZ5BwSzEjJMU2xV7gz0gmRWU7zUypK5jkqMx4HlKNHj+od9/LyUlq0aKE3h+TZs2cVjUaj7N69W1EURbly5YoCKCEhIXp5Q0NDFUBZv359tvXRaDTKb7/9ph4LCwtTAOXQoUPqMUBZtGiRuu/t7a3odDrl9u3b6rH0eW2joqIURVGU3bt3K4ASERGhpnnw4IFiZ2enVKpU6bl1UhRFOXXqlAIoYWFhz02TnJysREdHK4Cyb98+RVEUJTU1ValQoYLSvn375+ZLv74TJ07oHf/4448VjUaj/P777+qxe/fuKTqdThk/frx6rFKlSkrp0qWVhw8fZnsNIn+lz5W613mCEuEyNcdtr/MEmStViIJm7PyUWeVv2rSpuv/48WN++uknevbsqS5vk5ycjLu7O25ubpnmmnzWjz/+iJWVFX369Mk2naurK3Xq1FH303u25lT/+vXr67V4n8134sQJ7O3t9WZCsba2xtfXN9tys7N3715atGiBnZ0dpUqVUluS6XP3/vHHH0RHR+sNSjfUoUOHqFu3LrVq1VKPOTo64ufnp84Xms7Hx0daiUWEzJX6j5J/haLYMXZ+ymc9O1dkTEwMKSkpjBs3LtNck3///XemuSafde/ePVxcXHKcasve3l5vP30M6bOzFRmb78aNG3qBM50xa+1ldOLECbp06YKrqyvffPMNkZGRHD16VO+c9+7dA8jVMAxD5wtNPyaKBo2picFbSSfvGEWRlN38lNbW1tnmfTaA2dvbo9FomDp1Kq+88kqm9E5OTtmWV7p0aW7cuIGiKIUy16yLiwt37tzJdDyrOTkNsW3bNuzs7Ni0aZP6jvKvv/7SS1O6dGmAXA3FcHR05I8//sh0/NatW3qrToDM3VuUaEw1aAzoEfwifGYlP/SLYi3j/JTx8fHqL+qs5o19Hp1OR/PmzTl37hyNGjXKtKX37HxeC69du3Y8fvyYTZs25d2FGaFx48bExsbqtZgfPnyY7eTy2UlISMDMzEzvF9yzc/d6eHhQoUIFgoODn1vO8+5Xq1atOHPmjF5wjImJYf/+/epya6Lo0ZQyMXgr6aTFKIocQ+anrFWrFl999RUbNmygRo0aODk5ZTt0YdGiRbRt25bevXvTp08fHBwciI6O5ocffmDIkCH4+PhQrlw57O3t2bBhA1WqVEGr1VKvXj3atWtHYGAgQ4cO5dKlSzRt2pT79++zefNmNm7cmO/3IyAggIYNG9KvXz/mz5+Pvb09H374ITY2Nrka8+fn58eyZcsYM2YM3bp1IzIyMtPA//RhJX379qVHjx4MHDgQrVZLZGQkjRs3plOnTri7u2NqaspXX31FqVKlKFWqFI0aNWLIkCEsXbqUjh07MmfOHLVXaqlSpXjnnXfy6K6IvKYxL4WJac4hQZNS8sNGyQ/9otgxZH7K119/nZ49ezJmzBgaN26sjs97nhYtWnD48GEePnzIkCFDCAwM5P3338fKyorq1asDYGJiQnBwMFeuXMHX15fGjRurLdQtW7bw9ttvs3LlSgICAhg/fnyOj3TzikajYceOHbz00ksMHz6cN998k44dO9KuXTvs7OyMLi8wMJCFCxeyY8cOunTpQkREBLt27cqUrnfv3uzYsYNr167Rp08f+vbty+HDh9WOOk5OTnz22WfqRNaNGzcG0pZBCg8PV+vbv39/HBwciIiIyHJpNFE0SIvxHzJXqhDF0JMnT6hduzatW7fO9nGnEDlJnys17KXZWJta5Jj+YUoibX6ZWaLnSi35bWIhSoAvvviC1NRUPDw8iImJ4T//+Q9Xr17l22+/LeyqiRJCU8qwHqcyV6oQokiwsLBgwYIF6hR1L730Ert376ZRo0aFWzFRYmhMDXtMqnkB3sBJYBSiGBg4cCADBw4s7GqIEszQMYqaF2BeGAmMQgghMNGaYlIq55BgksUCzSWNBEYhhBAG9ziVR6lCCCFeCAa/Y5RHqUIIIV4EBr9jTJXAKIQQ4gVg8KNUaTEKIYR4EUhg/IcERiGEEJiYl8LE3IBeqZqSHzZK/hUKIYTIkcZUg8Y05yWlDElT3ElgFEIIgaaUxrBHqakSGIUQQrwADH7HKL1ShRBCvBAMHK6BIWmKOQmMQgghDB/gnyKBUQghxAvA4AH+0mIUQgjxIjB4uIZS8sNGyb9CIYQQOTPVpG2GpCvhJDAKIYRAY2KCxsSAR6kGpCnuJDAKIYRIC4yGvGOUwCiEEOJFoDHRGNhilEepQgghXgTyjlFV8tvEQgghcmRiVsrgzRjnz5/Hz88PnU5HuXLlePfdd3ny5EmO+SpXroxGo8m0JSYm5vYSDSaBUbyQKleuzFtvvaXub9++nRUrVhhdztWrV9FoNGzevDkvqwfA4MGDqVu3bo7pNBoNixcv1jv27rvv4uLigomJCe+8885zr8/Qc4iSL73zjSGboWJiYmjbti1Pnjxh69atzJs3jy+++ILx48cblP/VV18lMjJSb9Nqtbm9RIPJo1TxQtq2bRsODg7q/vbt2zl58iSjRo0qxFrlTmRkJJUqVVL39+/fz6JFi1i6dClNmzbF1dWVmTNnZnl9M2bM4NGjRwVdZVEU5cOj1M8//5z4+Hi2bduGo6MjAMnJyYwaNYqpU6fi6uqabX5nZ2eaNWtm8PnyigRG8UJq0KBBYVchzzz7i+P8+fMAvP3225jk8Nd9tWrV8q1eonjRaAwcrqExvMW4d+9e2rVrpwZFgF69ejFixAhCQ0MZPHhwbqqa7+RRqihR0h8N7t27l7p162JhYcHLL7/M0aNH9dJlfJQ6ePBgvv76a86ePau+x8j4AxsZGYm/vz+2trbY2NjQtGlTfvjhB73yEhMTeeutt3BwcMDFxYWJEyeSnJycbV3Pnj1LYGAgpUuXxsrKCg8PDz788MNM6cLDw2nQoAE6nY4mTZrw888/630946NUHx8fxowZA4CpqSkajYbKlSs/9/qefZS6evVqNBoNp06dIiAgAJ1OR40aNVizZo3eORVF4f3336dcuXJYW1vTs2dP9u/fj0ajITw8PNvrFkVT+pRwhmwA8fHxeltSUlKmMs+fP0/NmjX1jtnb2+Pi4qL+AZeddevWodVqsba2JjAwkDNnzuTNxeZAAqMocW7cuMGoUaOYNGkSmzZtQqvV0r59e27fvp1l+hkzZhAYGEjVqlXV9xgzZswA4KeffsLHx4ekpCS+/PJLtmzZQteuXfn777/1ypg2bRomJiZs2rSJESNGsGTJEr788sts69m5c2diYmJYtWoVu3fvZuLEiZkea968eZO3335bvZbExES6devG06dPsyxzxYoVvPPOOwDqtezYseO51/c8/fv3x9/fn+3bt9OgQQMGDx7MuXPn1K8vX76cWbNmMXjwYLZu3Uq1atV44403si1TFG1pwzUM2wDc3Nyws7NTt/nz52cqMyYmBnt7+0zHHRwcuH//frb16dKlC59++in79+/ns88+4+LFi7Rq1YrLly/nyfVmRx6lihLn/v37hISE0LZtWwC8vb1xc3Nj6dKlWf7wVqtWjTJlyvDXX39leiz57rvvUr16dQ4cOICpqSkA/v7+mcpo2rQpn3zyCQB+fn6EhYWxefNmRowYkWUd7969y5UrV/j444/p3LkzAG3atMnyWg4ePEidOnUA0Ol0tGnThmPHjtGqVatM6WvXrq2+b8x4Lc+7vud566231PeRLVq0YPfu3WzZsoXp06eTkpLCggULGDJkCAsWLFDvyd27d1m1apVB5YuiR2NWCo0BPU41yWlpoqKisLW1VY/ndaeY9J8ngNatW+Pv70/NmjVZvHhxrjrKGUNajKLEsbOzU4Ni+n67du04duyYUeU8fvyYo0ePMmjQIDUoPs+zwbJ27dpER0c/N33p0qWpVKkSQUFBfP31189N6+rqqgbF9HKBbMvOCxmvR6fTUalSJfWc0dHR3Lhxgy5duujl6dq1a77WSeQvjanGwEepaS1GW1tbvS2rwOjg4EBcXFym4zExMXrvHQ3h4uJCq1atMr1KyA8SGEWJU6ZMmUzHnJ2duXHjhlHlxMTEkJqammPPOSDT4yJzc/Nsx1tpNBpCQ0OpVasWo0ePxs3NjUaNGhEREZFjuUC+j+XK7nrS7+Oz97ls2bL5WieRv4x9lGqImjVrZnqXGBcXx40bNzK9eyxKJDCKEufOnTuZjt26dQsXFxejyrG3t8fExITr16/nVdX0uLu7ExISQkxMDOHh4Wi1Wjp37szDhw/z5Xx5Jf0+Pnufn/cOVxQTJiaGbwYKCAhg//79xMbGqsdCQkIwMTHJ8pVEdq5fv87hw4dp3LixUflyQwKjKHHi4uI4cOCA3v7+/ftp2rTpc/Nk1cLT6XQ0b96cNWvWkJKSkm/1NTMzw9vbmylTphAfH58vgTinFqwxKlSoQLly5dixY4fe8e3bt+dJ+aJwpE8inuNmRGAcMWIENjY2vPLKK4SGhhIcHMykSZMYMWKE3pMYX19fqlevru5v2LCB/v37s27dOsLCwli1ahVeXl6YmpoyYcKEPL3urEjnG1HiODo68vrrrzN79mzs7e1ZsGABiqKovTWzUqtWLb766is2bNhAjRo1cHJyonLlyixYsIC2bdvSrl07Ro0ahYODA//73/9wcnJi6NChua7jr7/+yoQJE+jduzfVqlUjLi6O+fPnU7ly5XwZW/i868sNU1NTgoKCeOedd3B2dqZNmzaEhYWxf/9+gBzHTooiytBZbYz4fB0cHPjxxx8ZM2YMr7zyCjY2NrzxxhvMnTtXL11KSore8KYqVapw/fp13nnnHWJjY7G3t6dt27a8//77VKlSxeDz55YERlHiuLi4sHDhQiZNmsSlS5eoU6cO+/btw9nZ+bl5Xn/9dY4fP86YMWO4d+8egwYNYvXq1bRq1Yrw8HCmT5/O4MGDMTU1pU6dOsyZM+df1bFcuXKUK1eO+fPnc+3aNezs7GjdujVr167NsaNPbjzv+nJrzJgxxMTEsGLFCj755BPatWvHokWL6N27N3Z2dnlXcVFgNGamaMxy/t4zJE1GtWrVUv9oep5nx742a9aMsLAwo86TlzSKoiiFdnYh8tjgwYM5efIkv/32W2FX5YUzY8YMlixZwr1797C0tCzs6ggDxcfHY2dnx1/rj2NrZZ1z+scPqdSvCXFxcXrDNUoSaTEKIYx27tw51q5dS4sWLTA3Nyc8PJzFixczcuRICYrFlKE9TmU9RiGEyIKVlRWRkZH85z//4cGDB5QvX55JkyYxa9aswq6ayKWM073llK6kk0epQgjxAkt/lBq1+X/Y6gx4lProIW6vNpRHqUIIIUo2jSZt4glD0pV0EhiFEEKAiSZtMyRdCSeBsYhITU3l+vXr2NjYGPRXmxCi5FIUhQcPHuDq6lpg40LlHeM/JDAWEdevX8fNza2wqyGEKEKioqKoUKFCgZxLeqX+QwJjEWFjYwPAmS/2Y2OpMyiPkpj1mnzZsaho3Iz2D+o9MPocVscsjEp/0fN/Rp+j7n0fo9LHHr9qVHpTS+OX0LFtUdGo9Mn25kaf40GIceMzzRxtjD6HRQXjBugn3XqUc6IMUuKMSw9gUcm479vk+CdGnyM5xrg5alOeGP/zZ2puZlC6BwmPqPd2gPp7oUCYYNgkoSW/wVhwgVEGXmcv/fGpjaXOoEG2AKkmxv9gWuqM/EGzNb7Tsk5n3Dg2axsro89h+8S460ixNOyepjO1ykVgtDGuh16yrfGBEQP/aEpnbuD3UkYW1sbd26QHxrUgkp8a3+Iw9vs2OTnzavI5eWpklhTTXARGrWGBMV1BvlbRaDQGdr6RFmOemTFjRqbVyYUQQhQR0vlGVWCBMT8mRs5KQkKCzLwhhBDGksCoKrCnxYMHD6Zu3boArF69Go1Gw6lTpwgICECn01GjRg3WrFmTKd/u3btp2bIlVlZWODg44OPjw6lTp4C0iWc1Gg27d+/m1VdfxdbWlp49ewIQGxvLqFGjcHFxQavV8vLLLxMaGpqpbD8/P8qWLYutrS1Nmzbl+++/10sTGxvLsGHDKF++PBYWFri5udGnTx+9NNHR0QwYMAAnJycsLS3x8vIqkFWmhRAir2hMNGhMDdgkMOav/v374+/vz/bt22nQoAGDBw/m3Llz6tc3btxI586dKVu2LOvXr2fdunW0bNmSa9eu6ZUzfPhwqlWrxrZt25g4cSJPnjzBz8+PXbt2MXfuXL777jtq165Nx44dOXPmjJrvypUrdO7cmW+++YYtW7bQsmVLAgMD9WZ6Hz9+PLt27WLevHns27ePRYsWodX+8/4pJiaGVq1acfr0aZYvX86WLVvQ6XS0bds224Vbk5KSiI+P19uEEKKwpPdKNWQr6Qq1V+pbb73FqFGjAGjRogW7d+9my5YtTJ8+HUVRmDhxIv7+/mzbtk3NExgYmKmcLl26sHDhQnU/ODiY06dP88svv1C7dm0A2rdvz59//skHH3zApk2b1POnS01NpU2bNpw9e5YvvvgCHx8fAI4fP06/fv0YNGiQmjZji3HZsmXExsZy/PhxypYtC6Qtuunu7s7ixYv58MMPs7z2+fPnM3v2bKPulxBC5Ju0qW8MS1fCFWqL0d/fX/2/TqejUqVKREdHA/DHH38QHR1t0GKwHTt21NsPDQ3F09MTd3d3kpOT1c3Pz48TJ06o6aKjoxk0aBDly5enVKlSmJmZERoayoULF9Q0DRs2ZPXq1SxevDjLHrWhoaG0adMGR0dH9TympqZ4e3vrnetZQUFBxMXFqVtUVFSO1ymEEPlFWoz/KNQWo729vd6+ubk5iYmJANy7dw8AV1fXHMt5dgHau3fvcurUKczMMneNTl8ENjU1lS5duhAXF8f7779P9erV0el0vPfee/z9999q+uXLl+Po6MiSJUuYNGkSbm5uBAUFMXLkSPVcR48ezfJc2XU40mq1eo9khRCiUEnnG1WRHeBfunRpIG1GmJw8O67G0dGRevXqsWrVqufmuXjxIqdOnWL79u107dpVPZ6QkKCXzs7OjmXLlrFs2TLOnDnDxx9/zKhRo6hbty6tW7fG0dGRDh068MEHH2Q6hwQ+IURxIeMY/1FkA6OHhwcVKlQgODiYXr16GZW3Xbt27NmzB1dX1+e2ONMDoLn5P4Os//rrL3766Sfc3d2zzOPp6cnSpUtZtWoV586do3Xr1rRr1461a9dSq1YtdDrjBl8LIURRkd7r1JB0JV2RDYwajYbFixfTt29fevTowcCBA9FqtURGRtK4cWM6der03LwDBw5k5cqV+Pj4MHHiRNzd3YmNjeXUqVM8efKE+fPnU7NmTSpUqMCUKVNISUnh4cOHzJw5k/Lly+uV1bJlS7p160bdunUxNTVlzZo1mJub07p1ayCt1+q6devw9vZm7NixVKxYkTt37nDs2DFcXV0ZN25cvt4nIYTIE/IoVVVkAyNA7969sbKyYu7cufTp0wcLCwsaNmxIt27dss2n1Wo5cOAAs2bNYu7cudy4cQMnJycaNGig9oLVarVs3bqV0aNH07NnT9zc3Jg+fToHDhzg5MmTalktW7ZkzZo1XLlyBRMTEzw9Pdm5cye1atUC0h75Hj16lOnTpzN58mTu3btH2bJladasWY71FEKIokImEf+HRlEU4yfDFHkufRXtGzevGbwqtvn9FKPPk2ppalT65FxMIvRrvY+MSl/v1/FGn8MkxbgO1cnHbxqV3rS5i1HpAe4sCTcqfdkJPkafI9gu517aGQ0KX5hzomdoKxo3ifiDUzeMSm9V2cGo9AC4GvmaIhe/u03uJhp3ilwsv3R94/N7qmf0IPERdT7oQVxcnMG/D3Ir/XfPvZ+vYGvAPLnxDx9Q+uUqBVK3wlKkW4xCCCEKhkZjWGvwBeh7U/IXEMk4FV1+i42NZdasWfz+++8Fcj4hhMgzJkZsJdwLcIkFJzY2ltmzZ0tgFEIUOxpTE4O3kk4epQohhEh7L2vIY1J5lFpyhIeH06BBA3Q6HU2aNNFb/UJRFBYvXoy7uztarZaqVauydOlSvfznz5+nT58+uLm5YWVlRe3atVmyZAmpqakAXL16lSpVqgDQs2dPdbDs1atXC+wahRAi19LnSjVkK+FeiBbjzZs3efvtt5kyZQp2dnYEBQXRrVs3Ll26hJmZGWPHjuXLL79k2rRpNG3alCNHjjB58mQsLS0ZMWIEANeuXcPDw4P+/ftjY2PD6dOnmTlzpjr+0cXFha1bt9K9e3fmzZtHmzZtAHBxybp3Y1JSEklJ/ywZLqtrCCEKk6JJ2wxJV9K9EIHx/v37HDx4kDp16gBpE5a3adOGY8eO4eLiwqeffsrnn3/O8OHDgbSZcx4/fszs2bMZPnw4JiYm+Pr64uvrC6S1MFu1asXjx4/59NNPmTlzJlqtlgYNGgBQo0YNmjVrlm2dZHUNIUSRIo9SVS/Eo1RXV1c1KALqUlTR0dHs378fgB49euitxNGuXTtu3ryprnqRmJjIzJkzqV69OlqtFjMzM6ZNm8aNGzd4+PCh0XWS1TWEEEWKPEpVvRAtxqxW8YC0YHf37l0URcHJySnLvFFRUVSqVInJkyfz3//+l5kzZ/Lyyy9jb2/Pjh07mDNnDomJiVhbWxtVJ1ldQwghiqYXIjBmx9HREY1Gw+HDh/UmFE/n4eEBQEhICG+++SaTJ09Wv7Z79+4Cq6cQQuQnxUSDYsAAf0PSFHcvfGBMf2947949Onfu/Nx0CQkJeoEzJSWFb7/9Vi9NxpaoEEIUK/KOUfXCB0Z3d3dGjx7Na6+9xqRJk2jatClPnz7lwoULhIWFsX37dgD8/Pz473//S+3atXFycmLFihV6vUoBypUrh729PRs2bKBKlSpotVrq1auXZUtUCCGKFAmMqhei801OPvnkE+bMmcO3335Lx44dGTBgABs3bsTb21tNs3z5cry9vRkzZgyvv/46np6eTJ06Va8cExMTgoODuXLlCr6+vjRu3NighZaFEKKwpQ/XMGQr6WR1jSIifYb721HXDZ6x3iTB+NU1km2NW11Dk4u/nYz9hlIw/jqMXV1DMTWuVprUXFy3Sapx58jFvTV9kGxUesU8///2TTGyD1lBfE+BcZ9F2jmMO4uJYtzPEoCiMaxe8fHxlCvnWqCra9y9fA1bm5zPFf8gHqeq5WV1DSGEECWcPEpVFYtHqT4+PnTq1KmwqyGEECWWotGoPVOz3V6AcYzFIjAKIYQQBaVEB8aEhITCroIQQhQPGiO2Es6owHjo0CE0Gg2XLl1Sj3Xu3BmNRsPZs2fVY3379qVjx44ATJkyBU9PT6ytrSlfvjx9+/blxo0beuX+9NNPeHl5YWdnh42NDZ6ennz99deZzr9582Y8PDywtrambdu2evW4evUqGo2G1atXM2zYMEqXLk2TJk2AtLlShw4dipOTE5aWlrRo0YKIiIhM5a9cuRIPDw+0Wi2VK1dmzpw56uoZAKtXr0aj0XDy5En8/f2xsrLCw8OD/fv3k5qayvTp03F2dsbZ2ZmgoCC9vEIIUaTJlHAqowJjkyZNsLCwUINKamoqhw8f1jsGEBERgZeXFwC3b99m6tSp7N69m48//pirV6/i7e1NcnJa77r4+Hg6duyIra0tGzZsYPv27QwfPpzY2Fi9c58+fZpFixaxYMECVq9ezcWLFxkwYECmOgYFBaEoChs2bGDRokWkpKQQEBDAzp07WbhwISEhIVhbW+Pn56e39NTy5csZMWIE7du3Z+fOnQwePJhZs2bx7rvvZjrHwIED6dSpE9u2bcPV1ZXu3bszduxYoqKiWLNmDaNHj2bBggWZJgDIKCkpifj4eL1NCCEKjbQYVUb1StVqtTRp0oSIiAiGDBnCr7/+yqNHjxg6dCgHDx5k5MiRXLx4kevXr6uB8auvvlLzp6Sk0Lx5cypUqMCBAwfw9/fnwoULxMXFMX/+fDw9PYF/ZqPJKDY2llOnTlGmTBkAHj58yJAhQ4iOjqZChQpquvr16/Pll1+q+9999x3Hjx/n+++/p3379gC0b9+e6tWrM2/ePLZs2UJKSgrvv/8+ffr04ZNPPgHA39+fJ0+esGTJEoKCgihdurRa5pgxYxg5ciQA5cuXx9PTk5MnTxIZGamW/9133xESEkK/fv2yvJeyuoYQoijJr2Wnzp8/z5gxYzhy5Ag2NjYMHDiQOXPm5DjxiaIoLFy4kBUrVnDnzh3q16/P0qVLc1y5KC8Y/Y7Ry8tLbR1GRETQqFEjAgIC9I5ZWVnRqFEjAPbu3UuLFi2ws7OjVKlSahC7cOECANWqVcPW1paRI0eyadMm7ty5k+V569evrwZF0F8hI6P0R7jpDh06hK2trRoUAczMzOjevTuHDx8G0j64u3fv0rNnT728vXv35smTJxw/flzvuJ+fn/p/d3d3IHMwd3d3z3bFDFldQwhRpJhoDN8MFBMTQ9u2bXny5Albt25l3rx5fPHFF4wfPz7HvAsXLmTmzJmMGzeOXbt24eLigr+/P5cvX/43V2kQowOjt7c3ly9f5tq1a+oj09atW3Pz5k3+/PNPIiIiaNasGWZmZpw4cYIuXbrg6urKN998Q2RkJEePHgX+mU/UwcGBH374ARsbG1577TXKlSuHj48PZ86c0TtvditkZOTs7Ky3HxMTQ9myZTNdh7OzM/fv31fTZJU3fT89XVZ1Sa9HVvXLbs5UrVaLra2t3iaEEIUmHx6lfv7558THx7Nt2zbat2/P0KFD+fDDD/n888+znRUsMTGR+fPnM2HCBMaNG4evry/ffvstjo6OLF68ONeXaCijA2Pz5s0xMzMjIiKCQ4cO4eXlhaOjI3Xq1OHgwYNERETQunVrALZt24adnR2bNm2iS5cuNGvWjHLlymUqs0mTJuzdu5fY2Fh27tzJ7du3eeWVV3J1QZpnXgw7Ojpy+/btTOlu3bqFo6OjmgbIlO7WrVt6XxdCiJJK0WgM3gy1d+9e2rVrp/c7tFevXqSmphIaGvrcfEeOHCE+Pp5evXqpx8zNzenevTt79uzJ3QUawejAqNPpaNiwIStXruTevXu0atUKSGtJrlu3jitXrqjvFxMSEjAzM9MLVuvWrXtu2ZaWlgQGBjJy5EiuXLmSJ6tUtGrVivj4eL0PITk5mW3btql19/DwoEyZMoSEhOjl3bRpE+bm5mrvViGEKLGMbDE+23nw2UUVIO01Vc2aNfWO2dvb4+Liwvnz559blfSvPZu3Vq1a/P333/k+FC9X4xi9vLw4ePAgL730kvoI0MvLi/DwcMzMzGjevDmQ9i7u5s2bjBkzhh9//JE5c+ZkGoaxe/duunfvzjfffMPBgwfZuHEjy5cvp2XLllhYWPzLy0t759ikSRMGDBjAV199xe7du+nUqRM3btxQJwE3NTVlxowZbNiwgXfeeYfQ0FDef/99Fi5cyJgxY/Q63gghRMmkGLGBm5sbdnZ26jZ//vxMJcbExGR6zQRpr9CefUX1bD6tVpspBjg4OKAoivr6K7/kaq5Ub29vFi1apLYMAfX/jRo1wtLSEoDAwEAWLlzI8uXLCQ4OpmXLluzatUvtsAJQvXp1TExMmDZtGrdv36Z06dL4+/tneZNzw9TUlD179jBx4kQmTZrEo0ePaNiwIaGhobz88stqujFjxmBmZsZHH33EihUrcHFxYdasWZlW0Mgv6XO5P3jwwOA8Jom5mEQcmUTcEEV2EvGHRk4ibiaTiBt+jqIziXj674GCXONB+f9/hqQDiIqK0usbodUa+Y1QlCmiSIiKijLmzzXZZJPtBdiioqLy/XdPXFycAig3bl1THiU8yHG7ceuaAihxcXE5ll2mTBllypQpmY67uroqkydPfm6+zz77TAGUhIQEveNffPGFotFolMePHxt/oUaQ1TWKCFdXV6KiorCxsdF7JxsfH4+bm1umv86ex9j0JeUcUqeic46iWKeCOEde1klRFB48eICrq6tB5eQFhRQMeXpjzBOemjVrZnqXGBcXx40bNzK9P3w2H8Aff/zBSy+9pB4/f/48FStWVJ9K5hcJjEWEiYmJ3kQFzzJ2SEduhoCUhHNInYrOOYpinQriHHlVJzs7O6PK+LeMfZRqiICAAObNm0dsbKz6rjEkJAQTExP8/f2fm69FixbY2toSEhKiBsanT5+ydetWAgMDDT5/bpXoScSFEEIYSEk1fDPQiBEjsLGx4ZVXXiE0NJTg4GAmTZrEiBEj9FrDvr6+VK9eXd23sLAgKCiIxYsX8/HHH3PgwAH69u3LvXv3mDhxYp5edlakxSiEEOL/X2wa0mI0nIODAz/++CNjxozhlVdewcbGhjfeeIO5c+fqpUtJSVHnz043efJkFEVh8eLF6pRw+/bto2rVqkbUIHckMBZxWq2WmTNnGtzjy9j0JeUcUqeic46iWKeCOEdB1Ck/KaSiGNCb15A0GdWqVYv9+/dnmyY8PDzTMY1GQ1BQEEFBQUadLy9oFKUA+wMLIYQoUuLj47GzsyPq5p/Y2toYkP4BbuVqEBcXV2KnspQWoxBCCFJJIdWAHqeGpCnuJDAKIYTgn+GThqQr2SQwCiGEyLd3jMWRDNcoAipXroxGo8m0GTKJ+vnz5/Hz80On01GuXDneffddnjx5UgC11jdgwABq1KiBTqfDwcEBLy+vbGfPz+j69ev06NEDGxsbHB0deeONN4iPj8/nGmdtxYoVdOrUiTJlyqDRaNi8ebPBeQv7s7h48SIjRoygfv36lCpVirp16xqUT1EUFixYoA6cbt68ubo8XEHbs2cP3t7elClTBq1WS9WqVRk/fjxxcXE55l21ahXu7u5YWFjw0ksvsWvXrgKocdZWr16d5c/0lClTss1XmJ+FYsS/kk5ajEXEq6++yoQJE/SO5dRTLX0R0Bo1arB161auXbvG+PHjefz4MZ9++ml+VjeTJ0+eMH78eGrUqEFiYiKrVq0iMDCQsLAwdRmyrDx9+lRdRHr9+vU8fvyYiRMn0q9fv0L5xbZmzRogbZ7f9P8boih8FmfPnmX37t00bdqU1NRUUlMN+8s+fUHYBQsWUK9ePT777DP8/f05ffp0gXSNz+j+/fs0bdqUt99+m9KlS/Pbb78xa9Ysfvvtt2z/0Pr2228ZNmwY06ZNo23btmzcuJFu3bpx6NChAlnx/Xm+//57vYH65cuXzzZ94X4WqRg2x2zJbzHKXKlFQKVKlZTRo0cbnW/evHmKTqdT7t27px5buXKlYmpqqly7di0vq2i05ORkxc3NTRk2bFi26davX69oNBrl/Pnz6rF9+/YpgHLs2LH8rmYmKSkpiqIoypUrVxRACQkJMShfUfgs0uuuKIoyaNAgpU6dOjnmSUhIUGxtbZWgoCD1WFJSklKpUiVl5MiR+VJPY33xxRcKkO19dHd3V/r27at3rHnz5kpAQEB+Vy9LwcHBCqDcuXPH4DyF9Vmkz5V65eYvyt3Hl3Pcrtz8xeC5UosreZRajOV2EdCCYGpqir29fY6PEvfu3Uu9evXw8PBQj/n5+eHo6FggC5I+y8Qkdz8Suf0s9u7dqz5mmzZtmnq8W7duaDQadDodFy5cyLe6F/aCsIZIX/bted9Lly9f5sKFC3rXANCnTx9+/PHHLNcJLIoK+7NInyvVkK2kk8BYRKxbtw6tVou1tTWBgYGcOXMmxzy5XQQ0vyiKQnJyMvfu3WPx4sX8+eefvPnmm9nmyeoaNBpNlpMPF2W5/SwCAgIYPnw4AIsWLeLMmTNs2rSJ7du3A2mP1jIu05Yf9YbCWxD2eVJSUkhMTOR///sf77//Pl26dKFy5cpZps3uGp48ecKVK1fyu7rPVadOHUxNTalatSrz588nJeX5QaWwPwtFUVCUVAM2eccoCkCXLl1o2rQpFStW5PLly8ydO5dWrVpx6tSpbN8r5HYR0PyyatUqhg0bBoC1tTUbN25UF61+nqJ2Dbn1b65jyZIl7N+/n8uXLzNkyBCioqIAaNeuHaNHj86P6qoMWRA2v1cyyEqlSpW4du0aAB06dGD9+vXPTZu+aO2z99/BwQGgUL6PXFxcmD17Nk2bNkWj0fDdd98xffp0rl279tx3zoX9WeTHJOLFlQTGAqQoit5fjBqNBlNTUz755BP1WOvWrfH396dmzZosXryYFStWFEZVn+t51wDwyiuvUL9+fe7evUtISAi9evVi27ZtBAQEFFZ1nyu76yho1tbWrFmzBi8vL37++WcgbWWF4OBgvSXIXiR79uzh0aNHnD17ljlz5tC5c2d++OGHQvuMjNW+fXu1UxmAv78/lpaWLF26lGnTpuHi4lKItXse6XyTTh6lFqCDBw9iZmambr6+vlmmc3FxoVWrVuovyedxcHDIsht7TEyM3ruuvJTdNTg5OdGoUSM6dOjAqlWrCAgIYNKkSdmWVxjXAIZ/Fob6t9fRsmVLGjVqpO536dIl22XI8oqDgwNJSUmZhgbFxMSg0WjUVldBq1evHs2bN+eNN95gx44dhIWFsW3btizTptfx2fuf3pLMz+8jY/Tq1YuUlBROnz6d5dcL+7OQ4Rr/kBZjAXr55Zc5ceKEum9jk/O8hNnJ7SKg/4Yx1/Dyyy+zd+/ebMurWbNmpvepiqLwxx9/4Ofn9+8qm42i9ll8/fXXHD9+XN1ft24dI0aMoEWLFv+qXjkp7AVhDVGvXj3MzMy4ePFill9Pv4bz58/rdeI6f/485ubmBT7kJLcK+7OQAf7/kBZjAbKxsaFRo0bqlvGHOKPr169z+PBhGjdunG15AQEB7N+/n9jYWPWYIYuA/huGXgPA4cOHc/ylFBAQwC+//MKff/6pHvvxxx+5d+9evi5Iasx1GOLffBZRUVGMHTsWSGsp1qpVi9TUVAYNGsTjx4//Vb1yknFB2HQFuSCsIY4dO8bTp0+f+71UtWpV3N3d9a4BYOPGjfj6+mJubl4Q1czRt99+i6mpKQ0aNMjy64X9WUiv1AwKbaCIUBQlbRxfv379lLVr1yoHDhxQvvzyS6VatWqKg4ODcvnyZTVdeHi4Ympqqnz99dfqsfv37ysuLi6Kt7e3sm/fPuWrr75S7O3tczUm8t/YtWuX0qtXL2XNmjVKWFiYsmXLFqVHjx4KoGzYsEFNd/XqVcXU1FSZPXu2euzJkydK3bp1FU9PT2Xnzp3Kxo0bFTc3N6Vjx44Feg3pTpw4oYSEhCgrVqxQAGXChAlKSEiIEh4erqbJy88iNTVV8fX1VQDFwcFBuXHjhhIZGamYmJgogDJq1CiD6/7o0SMlJCRECQkJUXx8fBQ3Nzd1//bt24qiKErbtm2VatWq6eWbP3++otVqlWXLlik//vij0qNHD8XGxka5dOmSwefOK926dVPmzp2r7Ny5U9m/f7+yZMkSpVy5ckq9evWUpKQkRVEUZejQoYqpqalevvTxsO+9954SFhamjBgxQilVqpRy5MiRAr8GRVEUf39/ZcGCBcru3buV3bt3K2+++aai0WiUd955R01TVD6L9HGM564fVKIf/pzjdu76wRI/jlECYyGLjIxUfHx8FCcnJ6VUqVKKk5OT0qtXL70B74qiKGFhYQqgBAcH6x3//fffFV9fX8XS0lIpW7asMnHiRPUXSEE5d+6c0rVrV8XV1VUxNzdXXF1dlQ4dOugFE0X5Z9D8zJkz9Y5HR0cr3bt3V6ytrRV7e3tl6NChhfZDN2jQoPSZlPU2b29vNU1efhbLly9Xz7F69Wr1+IQJExRA0Wg0SmhoqEF1T7+/WW1hYWGKoiiKt7e3UqlSJb18qampyrx585QKFSooWq1Wadq0aaEFlPnz5yv169dXbGxsFJ1Op9SpU0eZMWOG3vdD+mf0rC+//FKpXr26Ym5urv6hVVjefvttpUaNGoqlpaWi1WoVT09P5eOPP1ZSU1PVNEXls0gPjGevHVD+fnA8x+3stQMlPjDKeoxCCPECS1+P8bdr+7Gx1eWY/kH8I+qWbyfrMQohhCjZUpUUUhUD1mM0IE1xJ4FRCCGEBMYMJDAKIYQgVUklVcl5KIYhaYo7CYxCCCFIVZ6QopgZlK6kk8AohBDi/1uMhjxKLfktRhngnw8GDx5s8OrpQoiiz5CfaY1Gw+LFi40uO7f58lr6O0ZDtpJOWoxCCJEHIiMjqVSpUmFXI9dSMbDzzQsw840ExhdMQkJCkZj/UoiSplmzZoVdhUySkpIwMzMzaBHrVFIMCnovQmCUR6kF4MyZM7Rv3x6dToednR2vvvoqf//9t/r1119/ndatW6v7d+/excTERG+u1IcPH2JmZqY3j+K5c+fo2rUrdnZ26HQ6OnbsyKVLl/TOrdFoWLBgAZMnT6ZcuXKULVs2H69UiJItPDycBg0aoNPpaNKkid4KOM8+ElUUhffff59y5cphbW1Nz5492b9/PxqNhvDwcL1yU1NTmTVrFs7Ozjg5OTFkyBAePXqklyY6OpoBAwbg5OSEpaWl3jJl6SpXrsxbb73Fhx9+SKVKlbC0tDR4PUpFSTF4K+kkMOazqKgovLy8uHfvHmvXruXzzz/nf//7H97e3jx48AAALy8vTpw4oS43ExERgVar5dSpU2qaI0eOkJycjJeXFwCXL1+mRYsW3L9/n9WrV7N+/Xru3LmDr68vSUlJenX4+OOPuXDhAqtWrWLt2rUFePVClBw3b97k7bffZtKkSWzatInExES6devG06dPs0y/fPlyZs2axeDBg9m6dSvVqlXjjTfeyDLtp59+yp9//snXX3/Ne++9x/r16/nggw/Ur8fExNCqVStOnz7N8uXL2bJlCzqdjrZt23L79m29srZs2cKuXbv4+OOP2bFjBzpdzrPZACSnPjF4K+nkUWo+W7p0KU+fPiU0NFRdF65BgwbUrl2b1atXM2bMGLy8vEhKSuLYsWN4e3sTERFBt27dCA0N5aeffqJDhw5ERETg7u6Os7MzALNnz8bR0ZEffvhBXfG7RYsWVK1alVWrVjFq1Ci1Do6OjmzduvWFXfRWiLxw//59Dh48SJ06dQDQ6XS0adOGY8eO0apVK720KSkpLFiwgCFDhrBgwQIgbbHiu3fvsmrVqkxlu7i4sG7dOgA6dOjA//73PzZv3qzmXbZsGbGxsRw/flx96uPr64u7uzuLFy/mww8/VMt6+vQpe/fuNTggpktVFAPHMZb8WUSlxZjPDh06RNu2bfUWS61ZsyYvvfQShw8fBqBKlSpUqFCBiIgIIK3F6OPjQ+vWrTl48KB6LL21CBAaGkqXLl0oVaoUycnJJCcn4+DgQIMGDfTWGYS0JZEkKArx77i6uqpBEaB27dpA2iPOZ0VHR3Pjxg26dOmid7xr165Zlv3s2qO1a9fWKzc0NJQ2bdrg6Oio/rybmpri7e2d6efdx8fH6KAIactOpRqwvQjLTkmLMZ/FxMRQv379TMednZ31nv2ntxTj4+P55Zdf8PLy4tGjR2zevJmkpCSOHz/OsGHD1PR3795l2bJlLFu2LFPZz64/l97KFELknr29vd5++s9Z+iuQjG7cuAFAmTJl9I4/7x1/VmVnfCVy9+5djh49iplZ5gH41apV09vP7c+7oe8PX4R3jBIY85mjo2OmdwAAt27dwt3dXd338vJi/PjxhIeH4+TkRM2aNXn06BGTJ08mLCyMpKQkvQ46jo6OdOzYUe+RabpnV6OX1qIQBcvFxQWAO3fu6B3P6neBIRwdHenQoYPee8d0Wq1Wbz+3P+8yV+o/JDDms1atWvHFF18QExODg4MDAH/88Qe//vorQ4cOVdOltxA/+ugj9ZFp/fr1sbS0ZMGCBbi5uVG5cmU1fbt27fjtt99o0KABpqamBXpNQojsVahQgXLlyrFjxw69x6fbt2/PVXnt2rVj7dq11KpVK1ePSQ0hgfEfEhjz2bhx4wgODsbf359p06aRmJjI9OnTqVixIoMHD1bT1axZk7Jly3Lw4EE++eQTAExNTWnZsiV79+6lf//+euXOnj2bxo0b0759e4YPH46zszM3b97k4MGDtG7dmr59+xbkZQohMjA1NSUoKIh33nkHZ2dn2rRpQ1hYGPv37wcwaFxhRuPHj2fdunV4e3szduxYKlasyJ07dzh27Biurq6MGzfuX9c5WXlKspLzH9nJSta9cEsS6XyTz9zc3Dh48CAODg7079+f4cOH89JLLxEeHp7pkWd6SzFjJxtvb+9MxwCqV6/O8ePHKV26NKNGjaJ9+/ZMmTKFR48eUa9evXy+KiFETsaMGcPMmTP56quv6NatG7///juLFi0CwM7OzqiySpcuzdGjR6lfvz6TJ0/G39+fcePGcfXqVZo2bZon9ZVxjP/QKMoL0PdWiCImPDycNm3acOLECRo1agTArFmz8Pf3p0WLFoVcO5FfZsyYwZIlS7h3716RmYEqPj4eOzs7tp99H52NRY7pHz1I5JU67xEXF4etrW0B1LDgyaNUIQpBw4YNiYyMpFatWuqx2bNnY21tLYGxhDh37hxr166lRYsWmJubEx4ezuLFixk5cmSRCYoZyTvGf0hgFKIQ2NraFsm5NUXesbKyIjIykv/85z88ePCA8uXLM2nSJGbNmlXYVcuSLFT8D3nHKF5YZ8+eJTAwkNKlS2NlZYWHh4feDCKRkZG0bdtWneO2X79+et3tr169ikaj4ZtvvmHEiBHY29tTtmxZPvroIwC+/fZbPDw8sLW1pXv37sTGxqp5w8PD0Wg0nDx5Evini/2kSZPQaDR682kmJiYyfvx4XF1dsbCwoH79+mzbtk3vWtKXRcpuLk9Im79z8eLFuLu7o9VqqVq1KkuXLtVLEx0dTa9evXB2dsbCwoIqVarkSeeOF02lSpU4cOAA9+/f5+nTp1y9epU5c+ZQqlTRbI8oBi459SK8Yyyan5AQBaBz5844OzuzatUq7OzsuHjxojrbSGRkJD4+PgQGBrJx40YePXrE9OnT6dq1K5GRkXrlTJs2jR49ehASEsL27duZMGECd+7cITw8nA8//JD4+HjGjBnDu+++yxdffJFlXSIjI2nevDljxoyhX79+wD8zq/Tv35/vv/+euXPnUrNmTdasWUOPHj3Yvn273swq6XN5TpkyBTs7O4KCgujWrRuXLl1SB4aPHTuWL7/8kmnTptG0aVOOHDnC5MmTsbS0ZMSIEQAMHDiQ69ev88knn+Ds7Mzff/+tBnBRcj1NSeZpSs49Tp+mJBdAbQqZIsQL6M6dOwqgfPfdd1l+3cvLS2nRooWSmpqqHjt79qyi0WiU3bt3K4qiKFeuXFEApVevXmqa5ORkxdnZWdHpdMrdu3fV4xMmTFDs7e3V/bCwMAVQTpw4oR4DlEWLFunV45dfflEA5fPPP9c73rx5c6Vhw4bq/qBBgxSNRqP89ttvmc5x6NAhRVEU5eLFi4pGo1FWrlypV9bkyZOVcuXKKSkpKYqiKIpOp1M++eSTLO+LKHni4uIUQFn7vwnK1gtTc9zW/m+CAihxcXGFXfV8I49SxQupdOnSVKpUiaCgIL7++mu9eSkfP37MTz/9RM+ePUlJSVHnpnR3d8fNzS3T3JQZ57k0NTWlatWq1K9fn9KlS6vH3d3diY2N5eHDh0bV89ChQwD07NlT73jv3r05deqU3tJEOc3lmT6GrkePHuo1JScn065dO27evElUVBSQ1jFo8eLF/Oc//+HixYtG1VcUX4Y8RjW0g05xJ4FRvJA0Gg2hoaHUqlWL0aNH4+bmRqNGjYiIiCAmJoaUlBTGjRuHmZmZ3vb333+rASRdVvNcGjOvZnZiYmIwMzPTm4Qe0ubDVBRF771lTue8e/cuiqLg5OSkd03pgT39ujZu3Iivry/Tpk2jRo0a1KxZk61btxpVb1H8pCgpBm8lnQRG8cJyd3cnJCSEmJgYwsPD0Wq1dO7cGWtrazQaDdOmTePEiROZtunTpxdYHR0dHXn69CkxMTF6x2/duoVGo8kUDHMqS6PR8NNPP2V5XS+99BKQNs/nV199xd27dzl+/DgeHh707t2by5cv5+WliSImNTXV4C2/7dy5k5deegkLCwvc3d0JDg7OMU96Z7hnt9z0/pbON+KFZ2Zmhre3N1OmTKFLly7cunWL5s2bc+7cOebMmVOg9Xi2RZm+zl9ISAjDhw9Xj4eEhKi9Tw3l6+sLwL179+jcuXOO6U1MTGjcuDFz5szhu+++4+LFi1StWtXg84nipaiMYzx8+DDdunXjjTfeYNmyZRw4cIDXX38dGxsbXn311Rzzz5s3jzZt2qj7z84wZggJjOKF9OuvvzJhwgR69+5NtWrViIuLY/78+VSuXJlq1aqxaNEi2rZtS+/evenTpw8ODg5ER0fzww8/MGTIEHx8fPK8TrVq1WLHjh20bt0anU6Hh4cH9erVo3v37owfP56EhAQ8PDxYu3YtR44cYceOHUaV7+7uzujRo3nttdeYNGkSTZs25enTp1y4cIGwsDC2b99OXFwc7du357XXXsPDw4MnT56wfPly7O3tadiwYZ5fsyg6lNRUUlMNWHYqn1uMH3zwAU2bNuXzzz8HoE2bNly6dIn33nvPoMBYo0aNfz1GWAKjeCGVK1eOcuXKMX/+fK5du4adnR2tW7dm7dq1mJqa0qJFCw4fPszMmTMZMmQIT548oUKFCvj6+lK9evV8qdNnn33G2LFjCQgIICEhgbCwMHx8fFi7di1Tp05lwYIF3L9/n5o1a7J582aDWn3P+uSTT/Dw8GDlypW8//77WFtb4+HhoXbusbCwwNPTk+XLl/P3339jaWlJo0aNCA0NxcnJKa8vWRQhT1OeUiol5yWrDBnSkVtJSUmEhYXpjScG6NOnDxs2bODq1at6qwzlF5krVQghXmDpc6UuPzQQS2vzHNMnPHzCmNZriIqK0psrVavVZlob0li///47derUYe/evXTo0EE9/ueff+Lu7p7peEZXr16lSpUqODk5cf/+fUqXLk3Xrl1ZuHBhps5rOZHON0IIIUhNTTF4g7SVg+zs7NRt/vz5/7oO6Z3Mnu1Ulr6W7f3795+bV6vVMnLkSL788ksOHDjAxIkT2bRpE76+vjx9alwrVx6lCiGEMHqu1KxajFmJi4vjxo0bOZb7bzt2ubi4sGLFCnXf29ubOnXq0KlTJ7Zt20avXr0MLksCoxBCCIPHKKansbW1NWjZqZCQEIYNG5ZjunPnzqktw7i4OL2vpbckjX0kGhgYiE6n4+effzYqMMqjVCGEEP8/RtGQR6nG9Up94403UBQlx61mzZpUq1YNMzMzzp8/r1dG+n7NmjXz7HqzI4FRCCEEKalPSU7JeUtJzb9eqVqtljZt2rB582a94xs3bqRWrVpG90jdtWsXjx49onHjxkblk0epQggh/v9Ras5tpfyeEm7GjBn4+PgwatQoevXqRVhYGOvXr2fjxo166UqVKsWgQYNYtWoVABMmTMDExIRmzZphb2/P8ePHmT9/Po0aNeKVV14xqg4SGIUQQhg83Vt+TwnXqlUrtm7dyvTp01m1ahUVK1bkyy+/zDSRfkpKCikp/wTp2rVrs2LFCr744gseP35M+fLlef3115k9e7bRa2DKOEYhhHiBpY9jnLW3PRY6sxzTJz56yqyAfcTFxRnU+aY4khajEEKI/+9Yk/OjVEOmjSvuJDAKIYQgRUk1cLhG/q+uUdgkMAohhCAl5SnJKTm/WUtJSS6A2hQuCYxCCCFITVVITc05MBqSpriTwCiEEMLoKeFKMgmMQgghpMWYgQRGIYQQpCoGBsYXYISfBEYhhBBpgdGAoCeBUQghxAshOTkF02SNQelKOgmMQgghpMWYgQRGIYQQKKmGdax5ATqlSmAUQgiRPlwj50epMlxDCCHEC0GGa/xDAqMQQggJjBlIYBRCCIGiKBiyCuGLsFJhzmuMCCEKzODBg6lbt25hV0O8gJJTUkhONmBLkeEaQogCNGPGDB49elTY1RAvIBmu8Q8JjEIUIdWqVSvsKogXlLxj/Ic8ShUiD6Q/Ag0PD6dBgwbodDqaNGnCzz//rKZJTExk/PjxuLq6YmFhQf369dm2bVuW5aSLjY1l2LBhlC9fHgsLC9zc3OjTp49enujoaAYMGICTkxOWlpZ4eXnpnVcIQ6Smphq8lXQSGIXIIzdv3uTtt99m0qRJbNq0icTERLp168bTp08B6N+/PytXruTdd99l+/bt1K5dmx49evDdd989t8zx48eza9cu5s2bx759+1i0aBFarVb9ekxMDK1ateL06dMsX76cLVu2oNPpaNu2Lbdv3873axYlh/L/j1Jz2l6EzjfyKFWIPHL//n0OHjxInTp1ANDpdLRp04Zjx45ha2vL1q1b+fzzz3nzzTcB6NChA1evXmX27Nl06dIlyzKPHz9Ov379GDRokHosY4tx2bJlxMbGcvz4ccqWLQuAr68v7u7uLF68mA8//DC/LleUMPIo9R8SGIXII66urmpQBKhduzaQ9qjz3r17APTs2VMvT+/evRk3bhyPHj1Cp9NlKrNhw4asXr0aFxcXOnTokKnHamhoKG3atMHR0ZHk5GQATE1N8fb25sSJE3l6faJkS05JQWNAh1PplSqEMJi9vb3evrm5OZD2bjEmJgYzMzMcHR310jg7O6MoCrGxsVkGxuXLl+Po6MiSJUuYNGkSbm5uBAUFMXLkSADu3r3L0aNHMTMzy5RXOvIIY6QaOFfqC/CKUQKjEAXB0dGRp0+fEhMTg4ODg3r81q1baDSaTEE1nZ2dHcuWLWPZsmWcOXOGjz/+mFGjRlG3bl1at26No6MjHTp04IMPPsiUN+O7SCFyIo9S/yGdb4QoAK1atQIgJCRE73hISIjaizUnnp6eLF26FIBz584B0K5dO37//Xdq1apFo0aN9DZPT888vgpRkhnS8cbQsY7FnbQYhSgA9erVo3v37owfP56EhAQ8PDxYu3YtR44cYceOHc/N17JlS7p160bdunUxNTVlzZo1mJub07p1ayCt1+q6devw9vZm7NixVKxYkTt37nDs2DFcXV0ZN25cQV2iKOZSFQNbjBIYhRB5Ze3atUydOpUFCxZw//59atasyebNm+ncufNz87Rs2ZI1a9Zw5coVTExM8PT0ZOfOndSqVQuA0qVLc/ToUaZPn87kyZO5d+8eZcuWpVmzZnTr1q2gLk2UAEqqYWstvgCrTqFRXoRBKUIIIbIUHx+PnZ0d7eZUpJRFzm/XkhNT2T/9b+Li4rC1tS2AGhY8aTEKIYT4/16phqUr6SQwCiGEQElVUAx4x2hImuJOAqMQQghpMWYggVEIIYR0vslAAqMQQghZjzEDCYxFRGpqKtevX8fGxgaNRlPY1RFCFCJFUXjw4AGurq6YmBTMPCwpKaBJNixdSSeBsYi4fv06bm5uhV0NIUQREhUVRYUKFQrkXNL55h8SGIsIGxsbAC7++af6/7ySH9/Gym/38qFUKFXNPn/KfWjAn8K58NTBPM/L1KTkzy+exJM386XcpOv386Xc/KDk073Naw8SH9Fwyit5/rsgO9L55h8FFhgHDx7MyZMn+e233wrqlMVK+uNTGxubPB80my+BUfckH0qFUvk0YLiUIc+IcuGpbfEJjOa6R/lSbqJlUr6Umx+KS2BMV5CvVRQlbTMkXUlXYIFxxowZPHqUPz+YQggh/h1ZXeMfBRYYC2ptuISEBCwtLQvkXEIIUWIYOFyDF+BRaoEtOzV48GB19fHVq1ej0Wg4deoUAQEB6HQ6atSowZo1azLl2717Ny1btsTKygoHBwd8fHw4deoUAOHh4Wg0Gnbv3s2rr76Kra2tukJ6bGwso0aNwsXFBa1Wy8svv0xoaGimsv38/Chbtiy2trY0bdqU77//Xi9NbGwsw4YNo3z58lhYWODm5kafPn300kRHRzNgwACcnJywtLTEy8uLn3/+Oc/unRBC5Lf0d4yGbCVdoa7H2L9/f/z9/dm+fTsNGjRg8ODB6jpzABs3bqRz586ULVuW9evXs27dOlq2bMm1a9f0yhk+fDjVqlVj27ZtTJw4kSdPnuDn58euXbuYO3cu3333HbVr16Zjx46cOXNGzXflyhU6d+7MN998w5YtW2jZsiWBgYGEh4eracaPH8+uXbuYN28e+/btY9GiRXoLwMbExNCqVStOnz7N8uXL2bJlCzqdjrZt23L79u3nXntSUhLx8fF6mxBCFJaUFMXgraQr1F6pb731FqNGjQKgRYsW7N69my1btjB9+nQURWHixIn4+/uzbds2NU9gYGCmcrp06cLChQvV/eDgYE6fPs0vv/xC7dq1AWjfvj1//vknH3zwAZs2bVLPny41NZU2bdpw9uxZvvjiC3x8fAA4fvw4/fr1Y9CgQWrajC3GZcuWERsby/HjxylbtiwAvr6+uLu7s3jxYj788MMsr33+/PnMnj3bqPslhBD5RWa++Uehthj9/f3V/+t0OipVqkR0dDQAf/zxB9HR0QwdOjTHcjp27Ki3HxoaiqenJ+7u7iQnJ6ubn58fJ06cUNNFR0czaNAgypcvT6lSpTAzMyM0NJQLFy6oaRo2bMjq1atZvHhxlj1qQ0NDadOmDY6Ojup5TE1N8fb21jvXs4KCgoiLi1O3qKioHK9TCCHyS6pi4KPUfG4w/vDDD/Tr149q1aqh0Wj0GjA5iYuL4/XXX8fR0REbGxteffVVbty4YXQdCrXFaG9vr7dvbm5OYmIiAPfupY2Tc3V1zbEcZ2dnvf27d+9y6tQpzMzMMqU1NTUF0lqIXbp0IS4ujvfff5/q1auj0+l47733+Pvvv9X0y5cvx9HRkSVLljBp0iTc3NwICgpi5MiR6rmOHj2a5bmy63Ck1Wr1HskKIURhKioD/L///nt++eUXvL29uX/fuDGyvXv35uzZs3z++edYWFgwbdo0AgICOHnyJKVKGR7uiuwA/9KlSwNpM8Lk5NmxPo6OjtSrV49Vq1Y9N8/Fixc5deoU27dvp2vXrurxhIQEvXR2dnYsW7aMZcuWcebMGT7++GNGjRpF3bp1ad26NY6OjnTo0IEPPvgg0zkk8AkhiovUVNAUgQH+ixYtYsmSJQAcOHDA4HyRkZHs27ePffv2qU8jPTw8qFWrFlu3bqVXr14Gl1VkA6OHhwcVKlQgODjYqAsCaNeuHXv27MHV1fW5Lc70AGhu/s8A7b/++ouffvoJd3f3LPN4enqydOlSVq1axblz52jdujXt2rVj7dq11KpVC51OZ1Q9hRCiqCgqA/xzOzfs3r17sbe3x8/PTz3m4eFB/fr12bNnT8kIjBqNhsWLF9O3b1969OjBwIED0Wq1REZG0rhxYzp16vTcvAMHDmTlypX4+PgwceJE3N3diY2N5dSpUzx58oT58+dTs2ZNKlSowJQpU0hJSeHhw4fMnDmT8uXL65XVsmVLunXrRt26dTE1NWXNmjWYm5vTunVrIK3X6rp16/D29mbs2LFUrFiRO3fucOzYMVxdXRk3bly+3ichhMgLTxNSSUk2YID/07Q0z/akL+zXQ+fPn8fDwyPTE8RatWpx/vx5o8oqsoER0p4XW1lZMXfuXPr06YOFhQUNGzakW7du2ebTarUcOHCAWbNmMXfuXG7cuIGTkxMNGjRQe8FqtVq2bt3K6NGj6dmzJ25ubkyfPp0DBw5w8uRJtayWLVuyZs0arly5gomJCZ6enuzcuZNatWoBaY98jx49yvTp05k8eTL37t2jbNmyNGvWLMd6CiFEYTM3N6dcuXJc3mD4XLrW1taZFj2YOXMms2bNyuPaGS4mJiZTvxUABwcHo99VahTlRZj5ruiLj4/Hzs6OWzdv5vlcqfkxWWqK5mneFyrylQmm+VKu+b38mYf25w6f5XmZt27cyvMyARKT83a+2MfKEwY/CCYuLi7vfx9kITExkSdPDJ//WFGUTC2z57UY4+LiDOoZWrVqVb1XWwCVK1emU6dOfPrppznm9/Pzw9TUNNMkLW+99Vam0QY5KdItRiGEEPnPwsICCwuLfCk7JCSEYcOG5Zju3Llz1KxZM9fncXBwyHLYW0xMDI6OjkaVVajjGAtCxqno8ltsbCyzZs3i999/L5DzCSFEUffGG2+gKEqO278JigA1a9bkjz/+4NmHoOfPnze67BIfGAtSbGwss2fPlsAohBAFLCAggJiYGH788Uf12IULFzh16lSWM6ZlRx6lCiGEKDL++usvddawx48fc+nSJTZv3gzAq6++qqYrVaoUgwYNUserN2/enPbt2zN06FCWLFmiDvCvV68e3bt3N6oOL0yLMTw8nAYNGqDT6WjSpIne6heKorB48WLc3d3RarVUrVqVpUuX6uU/f/48ffr0wc3NDSsrK2rXrs2SJUtI/f/RrlevXqVKlSoA9OzZE41Gg0aj4erVqwV2jUIIUdyFhYXRs2dPevbsyZ07d/j+++/V/YxSUlJISUnRO7Zx40b8/PwYPnw4/fr1o0aNGuzZs8eoWW/gBWkx3rx5k7fffpspU6ZgZ2dHUFAQ3bp149KlS5iZmTF27Fi+/PJLpk2bRtOmTTly5AiTJ0/G0tKSESNGAHDt2jU8PDzo378/NjY2nD59mpkzZ6rjH11cXNi6dSvdu3dn3rx5tGnTBgAXF5cs65SUlERS0j892WR1DSGESOsXMnjw4BzTZTWgws7OjlWrVmU765khXojAeP/+fQ4ePEidOnWAtAnL27Rpw7Fjx3BxceHTTz/l888/Z/jw4UDazDmPHz9m9uzZDB8+HBMTE3x9ffH19QXSPpBWrVrx+PFjPv30U2bOnIlWq6VBgwYA1KhRg2bNmmVbJ1ldQwghiqYX4lGqq6urGhQBdSmq6Oho9u/fD0CPHj30VuJo164dN2/eVLv/JiYmMnPmTKpXr45Wq8XMzIxp06Zx48YNHj58aHSdZHUNIYQoml6IFmNWq3hAWrC7e/cuiqLg5OSUZd6oqCgqVarE5MmT+e9//8vMmTN5+eWXsbe3Z8eOHcyZM4fExESsra2NqlNhT58khBAiay9EYMyOo6MjGo2Gw4cPZ5p1AdImoYW0QapvvvkmkydPVr+2e/fuAqunEEKIgvHCB8b094b37t2jc+fOz02XkJCgFzhTUlL49ttv9dJkbIkKIYQonl74wOju7s7o0aN57bXXmDRpEk2bNuXp06dcuHCBsLAwtm/fDqTNw/ff//6X2rVr4+TkxIoVK/R6lQKUK1cOe3t7NmzYQJUqVdBqtdSrVy/LlqgQQoii6YXofJOTTz75hDlz5vDtt9/SsWNHBgwYwMaNG/H29lbTLF++HG9vb8aMGcPrr7+Op6cnU6dO1SvHxMSE4OBgrly5gq+vL40bNzZooWUhhBBFh6yuUUTk5+oaCvm85HYeill5Il/K/evj8Hwpt0ynWnle5i/fHMzzMgFuPjJu6R1DpSrF5/uruEhQnjI5dXuBra4h9EmLUQghhMigWARGHx8fOnXqVNjVEEII8QIoFoFRCCGEKCglOjAmJCQUdhWEEEIUM0YFxkOHDqHRaLh06ZJ6rHPnzmg0Gs6ePase69u3Lx07dgRgypQpeHp6Ym1tTfny5enbty83btzQK/enn37Cy8sLOzs7bGxs8PT05Ouvv850/s2bN+Ph4YG1tTVt27bVq8fVq1fRaDSsXr2aYcOGUbp0aZo0aQKkzZU6dOhQnJycsLS0pEWLFkRERGQqf+XKlXh4eKDVaqlcuTJz5sxRV88AWL16NRqNhpMnT+Lv74+VlRUeHh7s37+f1NRUpk+fjrOzM87OzgQFBenlFUIIUTwYFRibNGmChYWFGlRSU1M5fPiw3jGAiIgIvLy8ALh9+zZTp05l9+7dfPzxx1y9ehVvb2+Sk5OBtN6YHTt2xNbWlg0bNrB9+3aGDx9ObGys3rlPnz7NokWLWLBgAatXr+bixYsMGDAgUx2DgoJQFIUNGzawaNEiUlJSCAgIYOfOnSxcuJCQkBCsra3x8/PTW3pq+fLljBgxgvbt27Nz504GDx7MrFmzePfddzOdY+DAgXTq1Ilt27bh6upK9+7dGTt2LFFRUaxZs4bRo0ezYMGCTBMAZJSUlER8fLzeJoQQovAZNcBfq9XSpEkTIiIiGDJkCL/++iuPHj1i6NChHDx4kJEjR3Lx4kWuX7+uBsavvvpKzZ+SkkLz5s2pUKECBw4cwN/fnwsXLhAXF8f8+fPx9PQE/pmNJqPY2FhOnTpFmTJlAHj48CFDhgwhOjqaChUqqOnq16/Pl19+qe5/9913HD9+nO+//5727dsD0L59e6pXr868efPYsmULKSkpvP/++/Tp04dPPvkEAH9/f548ecKSJUsICgqidOnSapljxoxh5MiRAJQvXx5PT09OnjxJZGSkWv53331HSEgI/fr1y/JeyuoaQghRNBn9jtHLy0ttHUZERNCoUSMCAgL0jllZWdGoUSMA9u7dS4sWLbCzs6NUqVJqELtw4QIA1apVw9bWlpEjR7Jp0ybu3LmT5Xnr16+vBkXQXyEjo/RHuOkOHTqEra2tGhQBzMzM6N69O4cPHwbSFiG+e/dupoUwe/fuzZMnTzh+/LjecT8/P/X/7u7uQOZg7u7unu2KGbK6hhBCFE1GB0Zvb28uX77MtWvX1EemrVu35ubNm/z5559ERETQrFkzzMzMOHHiBF26dMHV1ZVvvvmGyMhIjh49Cvwzn6iDgwM//PADNjY2vPbaa5QrVw4fHx/OnDmjd97sVsjIyNnZWW8/JiaGsmXLZroOZ2dn7t+/r6bJKm/6fnq6rOqSXo+s6pfdnKlarRZbW1u9TQghROEzOjA2b94cMzMzIiIiOHToEF5eXjg6OlKnTh0OHjxIREQErVu3BmDbtm3Y2dmxadMmunTpQrNmzShXrlymMps0acLevXuJjY1l586d3L59m1deeSVXF6TRaPT2HR0duX37dqZ0t27dwtHRUU0DZEp369Ytva8LIYQo+YwOjDqdjoYNG7Jy5Uru3btHq1atgLSW5Lp167hy5Yr6fjEhIQEzMzO9YLVu3brnlm1paUlgYCAjR47kypUrebJKRatWrYiPjyc0NFQ9lpyczLZt29S6e3h4UKZMGUJCQvTybtq0CXNzc7V3qxBCiJIvV6treHl5sWjRIho2bKg+AvTy8uKzzz7DzMyM5s2bA2nv4pYtW8aYMWPo1q0bkZGRfPPNN3pl7d69m1WrVtGtWzcqVqzIzZs3Wb58OS1btsTCwuJfXl7aO8cmTZowYMAAFixYgLOzM8uXL+fGjRvqJOCmpqbMmDGDt99+m7JlyxIYGMjRo0dZuHAh77zzjl7HGyGEECVbrgKjt7c3ixYtUluGgPr/Ro0aYWlpCUBgYCALFy5k+fLlBAcH07JlS3bt2qV2WAGoXr06JiYmTJs2jdu3b1O6dGn8/f2ZP3/+v7kulampKXv27GHixIlMmjSJR48e0bBhQ0JDQ3n55ZfVdGPGjMHMzIyPPvqIFStW4OLiwqxZszKtoJFf0udyf/DgQd6XXYwmEX+Q+Chfyn2UmpRzolywePI4z8t8rDzJ8zIhbWLq/CCTiOe9xP//rGSNh8Ihq2sUEdHR0bi5uRV2NYQQRUhUVJTecDRRMCQwFhGpqalcv34dGxubTB2InhUfH4+bmxtRUVF51ps1P8osbuUWp7rmV7nFqa75VW5RqKuiKDx48ABXV1dMTEr0zJ1FUq4epYq8Z2JiYvRfhvkxzCO/ho4Up3KLU13zq9ziVNf8Krew62pnZ5fn5xaGkT9FhBBCiAwkMAohhBAZSGAshrRaLTNnzkSr1RbpMotbucWprvlVbnGqa36VW5zqKvKHdL4RQgghMpAWoxBCCJGBBEYhhBAiAwmMQgghRAYSGEWhunjxIiNGjKB+/fqUKlWKunXrZplu1apVuLu7Y2FhwUsvvcSuXbsMKv/69ev06NEDGxsbHB0deeONN4iPj8/LS8hXDx8+pEKFCmg0Gk6ePJltWkVRWLBgARUrVsTS0pLmzZury7wVN4Z+XzyrJN0DUXgkMIpCdfbsWXbv3k316tXVxaef9e233zJs2DB69+7N3r17ad68Od26dcvxF97Tp09p3749Fy5cYP369fznP/9h37599OvXLz8uJV988MEHJCcnG5R24cKFzJw5k3HjxrFr1y5cXFzw9/fn8uXL+VzLvGfI90VWStI9EIVIEaIQpaSkqP8fNGiQUqdOnUxp3N3dlb59++oda968uRIQEJBt2evXr1c0Go1y/vx59di+ffsUQDl27Ni/rHn+O3funKLT6ZTPP/9cAZQTJ048N21CQoJia2urBAUFqceSkpKUSpUqKSNHjiyI6uYpQ74vnlXS7oEoPNJiLAb27t2LRqNBo9Ewbdo09Xi3bt3QaDTodDouXLhQiDXMvZzmgbx8+TIXLlygV69eesf79OnDjz/+SFLS81fN2Lt3L/Xq1cPDw0M95ufnh6OjI3v27Pl3FS8AY8aMYcSIEXr1f54jR44QHx+vd5/Mzc3p3r17sbjWZ+VmftCSdg9E4ZHAWAwEBAQwfPhwABYtWsSZM2fYtGkT27dvB9IeH2VcyqskOX/+PAA1a9bUO16rVi2ePHnClStXss37bD6NRkPNmjXVcouqzZs3c+bMGd577z2D0md3n/7++28SEhLyvI5FjdwDkVckMBYTS5YsoWrVqjx9+pQhQ4YwZswYANq1a8fo0aMLuXb5JyYmBgB7e3u94w4ODgDcv38/27zP5kvPm12+wvb48WPGjx/PvHnzDJ7EOiYmBq1Wm2lxbwcHBxRFUe9jSSb3QOQVCYzFhLW1NWvWrMHExISff/6Z27dvY2dnR3BwcI7LVIni5f/au/egqMo3DuDfBZaLLCK7XBIVVgyBFAdJvIAGivcVFa3MSUWdrDRLxchLpmJkFDKS5G1GE7w1golGFoMlKioKOV4Sb6hoYshFVwwFYeH5/eHs+XHYXW5CeHk+Mzvjec97l9l333Pe856IiAg4ODhg2rRprV0Vxl5KPDA+R/z8/NCrVy/hePTo0S/8S0y1M8OSkhJRuPbXv1wurzNt7XTatHWla003b95EdHQ0wsPDUVJSgvv376O0tBTAk0c3tP+uzcbGBo8fP0Z5ebkoXK1WQyKRCP34IuM+YM2FB8bnSHx8PDIzM4XjHTt24Pjx461Yo5anvV9U+57gpUuXYGpqChcXlzrT1k5HRLh8+bLOfahnRW5uLioqKqBSqWBjYwMbGxsEBQUBAAYOHIjBgwfrTadtz+XLl0Xhly5dEp7pe9FxH7DmwgPjc+LWrVuYM2cOgCczRQ8PD1RXVyMkJASPHj1q5dq1HBcXF3Tt2hWJiYmi8F27diEwMBCmpqYG044YMQJnz55FTk6OEPbHH3/g7t27GDlyZIvV+Wl4eXkhLS1N9Fm9ejUAYMOGDVi3bp3edL6+vmjbtq2onyorK7Fnz55ntq3NjfuANZtWflyENUB1dTUFBgYSALKxsaH8/HzKyMggIyMjAkCzZs1q7So22cOHDykxMZESExMpICCAOnXqJBwXFhYS0f+fR1y6dCmlpaXRhx9+SCYmJnT8+HEhnxs3bpCxsTGFh4cLYRUVFdS9e3fy9PSk5ORk2rVrF3Xq1IlUKtV/3s6nkZaWpvMc46BBg6hLly6ieF9//TWZmZlRTEwM/fHHHzR+/HiysrKia9eu/ddVfmoN+bt40fuAtR4eGJ8DsbGxBIAAUFxcnBA+f/58AkASiYRSU1NbsYZNl5ubK7St9ictLU2It2nTJnr11VfJ1NRUGOj05bNs2TJReF5eHo0bN45kMhm1a9eOpk+fTiUlJf9By5qPvoHR39+fnJ2dRfGqq6tp5cqV1LFjRzIzM6M+ffqIfjw8Txryd/Gi9wFrPfw+RsYYY6wGvsfIGGOM1cADI2OMMVYDD4yMMcZYDTwwMsYYYzXwwMgYY4zVwAMjY4y1kKlTp6J79+6tXQ3WSPy4BmOMtZBr167h4cOH6NGjR2tXhTUCD4yMMface/z4MaRSaZNe8Mx0cS8yxlgt2kughw4dQs+ePWFpaYnevXvj1KlTQpzy8nKEhobC0dER5ubm8PLyQlJSkt58tO7fv48ZM2agQ4cOMDc3R6dOnfDOO++I0uTl5WHSpEmwtbWFhYUF3njjDVG5AKBUKjF79mx8++23cHZ2hoWFxTP9jtHnjUlrV4Axxp5Fd+7cwSeffIKFCxfC2toaixYtQnBwMK5duwapVIp3330XKSkp+Oqrr+Du7o6tW7di/Pjx2Lt3L0aPHq03z9DQUPz222+IjIyEUqlEfn4+fvvtN+G8Wq1G//79IZPJEBsbC2tra8TGxmLQoEHIycmBvb29EPenn36Cq6srvvvuOxgbG8PS0rLF++Sl0Zr70THG2LMoJCSEJBIJnT9/XgjT7lmbnp5OZ8+eJQC0YcMGUbp+/fqRt7e3KJ9u3boJx926daPQ0FCD5S5dupSsra2poKBACCsvLycnJycKCwsTwpydnUmhUFBpaelTtZPpx5dSGWNMD0dHR3Tr1k04fu211wA8udSZnp4OAHjrrbdEaSZMmIDTp0/j4cOHevP09vZGXFwcVq1ahfPnz+ucT01NxcCBAyGXy6HRaKDRaGBsbAx/f39kZWWJ4gYEBPAssYXwwMgYY3q0a9dOdKx992d5eTnUajWkUinkcrkojoODA4gI9+/f15tnbGwsJk+ejOjoaHh6esLJyQnr168XzhcXF2Pv3r2QSqWiz7Zt23Dr1i2dsljL4HuMjDHWSHK5HJWVlVCr1bCxsRHCCwoKIJFIdAZVLWtra8TExCAmJgZ//fUXvvvuO8yaNQvdu3fHgAEDIJfLMXz4cHz55Zc6ac3MzETHEomkWdvE/o9njIwx1kj9+/cHACQmJorCExMThVWs9fH09MTq1asBABcvXgQADB48GBcuXICHhwd69eol+nh6ejZzK5ghPGNkjLFG6tGjB8aNG4fQ0FCUlZXBzc0N27dvx/Hjx7Fv3z6D6fz8/BAcHIzu3bvD2NgYW7duhampKQYMGADgyarVHTt2wN/fH3PmzIGTkxOKiopw8uRJODo6Yt68ef9VE19qPDAyxlgTbN++HYsXL0ZkZCTu3bsHd3d37N69G0FBQQbT+Pn5YevWrcjNzYWRkRE8PT2RnJwMDw8PAIBCocCJEyewZMkSLFiwAHfv3oW9vT369u2L4ODg/6pprLWXxbLnV+2l6M9yHVavXk379+//D2rUNP7+/qRSqVq7GvU6ffo0LVu2jB4+fCgK37JlCwGgoqKiZimnoKCAZDIZ/fXXX3XGW716NdX8GsvNzSUAwkcikZCjoyNNnDiRbty4IUo7ePBgioiIaJb6shcL32NkTfbFF19g586drV2NBomJicGvv/7a2tUwaN26dYiOjm7tatTrzJkzCA8Px6NHj0ThKpUKGRkZBhedNNZXX32FgICAJm/AvXLlSmRkZODo0aOIjIxERkYGRo4ciaqqKiHO4sWLsWrVKqjV6mapM3tx8KVU1mRdunRp7Sq8MLTPyLWGsrIyWFhYPFUednZ2sLOza5b6lJaWYvPmzdi2bVuT83B1dUXfvn0BAL6+vmjbti3Gjh2Ly5cvC309cOBA2NjYID4+HnPnzm2OqrMXBM8YmUHZ2dkYOXIkFAoF2rRpAzc3N3z77bfCeX2v1Dl69Ch69uwJc3Nz9OjRAwcOHICXlxemTp2qk66ufSgBIDo6Gj4+PrC2toa9vT1GjRqFK1euNLodSqUSN2/exNq1ayGRSCCRSBAXFwcAqK6uRkREBJRKJczMzODu7o6NGzc2KN+AgACMGjUKu3fvhpubG2QyGQYNGoRr166J4t27dw/Tp08X9r709fXFkSNH9OallZeXh7fffhsODg4wNzdH586ddRZeXLx4EWPGjIG1tTUsLS2hUql0yq7txo0bQvtnzJgBhUKB3r17AwD279+PIUOGwN7eHm3btkWfPn2QkpIipI2Li8O0adMAPBkIJRIJlEqlcE4ikaC4uLhR7dZn9+7dAIARI0aIwh88eIApU6bAysoKdnZ2+Oyzz6DRaOrNDwCsrKwAAJWVlaLwt956C/Hx8Q3Kg708eGBkBgUFBUGtVmPz5s3Yv38/Pv30U4M7egBAfn4+hg8fDisrKyQkJCAsLAwzZ87E7du3deJq96EMCwtDQkICysvLERwcLPriysvLw+zZs7Fv3z5s2rQJ1dXV8PX1bfRmyUlJSXjllVfw5ptvIiMjAxkZGVCpVACAsLAwLF++HFOnTkVycjKGDh2KDz/8EN9//32D8j5z5gyioqIQGRmJuLg4XL16FZMmTRLOV1VVYcSIEUhOTsY333yDxMREyGQyDBkyROeHQE1TpkzBuXPnsGbNGqSkpCA8PFx0GfD69etCX8TFxWHnzp0oKipCYGAgHj9+XG+9Fy1aBCLCjz/+iKioKABAbm4ugoKCsG3bNvz000/w8/PDyJEjcejQIQBPLpcuWbIEAJCSkoKMjAydTbOftt0A8Pvvv8Pb2xvm5uai8OnTpyMpKQmRkZGIj4/HhQsXEBMTozeP6upqaDQaVFRU4OLFi1i+fDnc3d11fsj5+vrizJkzKCoqqq/L2MuktW9ysmdTUVERAaCff/7ZYJzaC1/CwsLI2tqaHjx4IISlp6cTAAoJCRGlq2sfSn00Gg09evSIZDIZbdy40WAdDHF2dqaPPvpIp41SqZQWLlwoCp84cSLZ2dmRRqOpM09/f3+ytLSkwsJCIUy7COXWrVtERLRv3z4CQCkpKUKciooKcnJyonHjxonyqrn4xtLSktasWWOw7ClTppCLiwuVlZUJYYWFhSSTyWjt2rUG02kXpwwfPrzOtlVVVVFlZSUNHTqUJk6cqNO+2otsaoc3tN36dO3aVef/Kjs7myQSCW3evFkI02g01Llz5zoX32g/Tk5OlJ2dbbA/fvnllzrrxF4uPGNkeikUCjg7O2PRokWIj49HXl5evWmysrIwcOBA4bIV8ORB6NrbZgF170OpdeLECQwZMgQKhQImJiZo06YNSktL67ycqt1fUvupy8mTJ1FZWal3v8uioiKhnKqqKoN5enl5ie6t1W5Heno62rZti2HDhglxpFIpxo0bh6NHjxqsm7e3N1atWoX169fj6tWrOudTU1MxevRomJiYCPWysbFBz549dfbU1Ec7Y64pLy8PISEh6NChA0xMTCCVSpGamtqky9dNbTfw5MpD7fuVWVlZICLRIwvGxsYYO3as3jy++eYbZGVlITMzE0lJSXB0dMTw4cN1rl7Y2toKZTKmxQMj00sikSA1NRUeHh746KOP0KlTJ/Tq1avOe0T6vtAAiF6Vo1XXPpQA8Pfff2Po0KGoqqrCxo0bcezYMWRlZcHe3l6Io0/tPSbrol2NWHvPSe2x9pJtly5dRHneuHGjwe1Qq9V62+/g4FDnJeFdu3YhMDAQn3/+OVxdXeHu7o49e/YI54uLixETE6PT3vT0dJ09NfWp3ebq6mqMHj0aR48exYoVK5CWloasrCyMGDGizv42pKntBp70Xe3tz/Lz8yGVSkXbr+lrh5aLiwt69eoFHx8fjB07Fj///DNu374t7DSjpS2nrKys3jaxlwevSmUGde3aFYmJiaisrMTx48exePFiBAUF4fbt25DJZDrx27dvr/deTWFhYaPLTklJQWlpKfbs2SMMPhqNpt4v1YbMlrS0M9nCwkJ06NBBCC8oKBCdT05OFt23c3R0bFQZ+tpfUFCgdyat1b59e/zwww/YtGkTTp06hYiICEyYMAGXL1+Gi4sL5HI5VCoVZs2apZO25ozdkNr7bF69ehWnT5/G3r17MWbMGCG8qQNGU9utTVt7E+727dsb3Ju0Iezs7GBra4vs7GxRuLYchULRoHzYy4FnjKxeUqkU/v7+WLhwIR48eIB//vlHbzwfHx8cPHgQ//77rxCWnp7epDeLl5WVQSKRiGZ9CQkJ9V4erb2/pJapqanOzKd3796QSqU6+10mJCTA3t4eXbt2BfBkT8uaeWpnhQ3Rv39/PHjwAKmpqUKYRqNBUlKSsN9mXYyMjODj44OIiAhoNBrhsurgwYNx/vx59OzZU6fNbm5uDa6flnYArNm2mzdv4tixY6J4tWfEhjxNu93c3JCbmysK8/HxAQDRYp+qqirs3bu3zry0CgoKUFxcLFw61dLO/pvSZ+zFxTNGpte5c+cwf/58TJgwAV26dEFJSQm+/vprKJVKg88vzps3D+vWrYNKpUJYWBju37+P8PBw2Nrawsiocb/BBg0aBACYNm0aPvjgA2RnZyM6OrrJD5B7eHjg4MGDOHDgAGxsbNC5c2fY2tri448/RlRUFMzNzdG3b1/8+uuv2LlzJ2JjY2FsbNyksmpSqVTo3bs3Jk2ahMjISDg4OCA2Nhb5+flYvHix3jQlJSUYNmwYJk+eDDc3N1RUVCA2Nhbt2rWDt7c3ACA8PBw+Pj4YNmwY3n//fTg4OODOnTs4fPgwBgwYgIkTJzaqnu7u7ujYsSMWLlyIqqoqlJaWYtmyZaKZNABh67K1a9di7NixaNOmjd7NrZvSbi0/Pz8kJCSIwl577TUEBwdj7ty5KC8vh1KpxLp161BRUaE3j5ycHJw4cQJEhNu3byMqKgoSiQQzZswQxfvzzz8hk8ng5eVVXxexl0lrr/5hz6aCggKaNGkSubi4kJmZGdnb29P48ePpypUrQhx9K0KPHDlCXl5eZGpqSh4eHvTLL7+QUqmkuXPn1plOrVYTANqyZYsQtnXrVnJxcSFzc3Pq27cvZWZm6qwubeiq1PPnz9OAAQPIyspKVE5VVRWtWLGCnJycSCqVkqurq85b2Q3Rt43b6dOnCQClpaUJYcXFxTR16lSSy+VkZmZG/fr1o0OHDhnMq7y8nN577z1yc3MjCwsLksvlNHToUMrMzBSluXLlCr399tukUCjIzMyMlEolTZkyRbTatzbtKszExESdc5mZmeTj40Pm5ubk6upK8fHxevt3+fLl1LFjRzIyMiJnZ2ci0r9atSHt1ufUqVMEQPS3RvTkb+Tdd98lS0tLUigUFBoaSlFRUfWuSrW1taXAwEA6fPiwTllBQUE0efLkeuvEXi4SIqJWGpPZSyAnJwfu7u744YcfEBIS0trVYc+J119/HWPGjMHSpUtbrAy1Wo1XXnkFBw4cwBtvvNFi5bDnDw+MrFktWrQIPXr0gKOjI65fv46VK1eirKwMly5d0rtghzF99u3bh5kzZyI3N1dnhWpzWbFiBQ4dOoSDBw+2SP7s+cX3GFmzqqiowIIFC1BQUAALCwsEBAQgKiqKB0XWKGPGjEFOTg5u3bqFV199tUXKkMvlWLNmTYvkzZ5vPGNkjDHGauDHNRhjjLEaeGBkjDHGauCBkTHGGKuBB0bGGGOsBh4YGWOMsRp4YGSMMcZq4IGRMcYYq4EHRsYYY6yG/wEjo22mSIff+gAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "variations_table_plot = {k:variations_table[k] for k in variations_table if k in ['time stretching', 'pitch shifting', 'noise']}\n", + "fig = benchmark.plot_variations(variations_table_plot, show_diff = True, figsize=(4.6, 4.2));\n", + "# fig.savefig(f'example_{dataset_name}_context.pdf', bbox_inches='tight')" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "SUPERB - IC Task (FSC).ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/speech/getting_started.ipynb b/examples/speech/getting_started.ipynb index 5af5944..ddedf50 100644 --- a/examples/speech/getting_started.ipynb +++ b/examples/speech/getting_started.ipynb @@ -26,7 +26,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "/home/moscato/miniconda3/envs/speechxai-ferret-integration-2/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + "/home/moscato/miniconda3/envs/ferret-testing-2/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n", "torchvision is not available - cannot save figures\n" ] @@ -101,6 +101,7 @@ } ], "source": [ + "# Note: set the ordinal of the device according to your system.\n", "device_str = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n", "device = torch.device(device_str)\n", "\n", @@ -170,7 +171,7 @@ " " ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -210,11 +211,18 @@ "execution_count": 8, "metadata": {}, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Transcribing audio to get word level timestamps...\n" + ] + }, { "name": "stderr", "output_type": "stream", "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" ] }, { @@ -222,103 +230,104 @@ "output_type": "stream", "text": [ "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", + "Transcribed audio with whisperX into: Turn up the bedroom heat.\n" ] }, { "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 Turnupthebedroomheat.Turnupthebedroomheat.
action=increase0.2510.5450.2430.1300.021action=increase0.2510.5450.2430.1300.021
object=heat-0.000-0.000-0.0000.0140.412object=heat-0.000-0.000-0.0000.0140.412
location=bedroom0.0020.0060.0820.9970.242location=bedroom0.0020.0060.0820.9970.242
\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -327,7 +336,7 @@ ], "source": [ "explanation = benchmark.explain(\n", - " audio_path=audio_path, \n", + " audio_path_or_array=audio_path, \n", " methodology='LOO'\n", ")\n", "\n", @@ -339,11 +348,18 @@ "execution_count": 9, "metadata": {}, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Transcribing audio to get word level timestamps...\n" + ] + }, { "name": "stderr", "output_type": "stream", "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" ] }, { @@ -351,107 +367,108 @@ "output_type": "stream", "text": [ "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", + "Transcribed audio with whisperX into: Turn up the bedroom heat.\n" ] }, { "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 Turnupthebedroomheat.Turnupthebedroomheat.
action=increase0.1550.2730.1170.2810.149action=increase0.1550.2730.1170.2810.149
object=heat0.0550.0150.065-0.0070.211object=heat0.0550.0150.065-0.0070.211
location=bedroom-0.065-0.0050.2530.7070.036location=bedroom-0.065-0.0050.2530.7070.036
\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -460,7 +477,7 @@ ], "source": [ "explanation = benchmark.explain(\n", - " audio_path=audio_path, \n", + " audio_path_or_array=audio_path, \n", " methodology='LIME'\n", ")\n", "\n", @@ -472,36 +489,6 @@ "execution_count": 10, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" - ] - }, { "data": { "text/plain": [ @@ -528,318 +515,155 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Working with transcriptions explicitly" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`Ferret` offers an interface with ASR (automatic speech recognition) models from [`WhisperX`](https://github.com/m-bain/whisperX) in the form of the `transcribe_audio` function. This is called from within `Ferret` and there's no need to access it explicitly. Nevertheless, should the need arise, here's how to generate the word-level transcript (with time alignments for the audio part) used internally by the `SpeechBenchmark.evaluate` method." + "## Explain paralinguistic impact" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, - "outputs": [], - "source": [ - "from ferret.explainers.explanation_speech.utils_removal import transcribe_audio" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + "Perturbation type: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:39<00:00, 4.98s/it]\n" ] }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" - ] - } - ], - "source": [ - "text, words_trascript = transcribe_audio(\n", - " audio_path=audio_path,\n", - " device=device.type,\n", - " batch_size=2,\n", - " compute_type=\"float32\",\n", - " language='en'\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ { "data": { "text/html": [ "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 Turnupthebedroomheat.
action=increase0.2510.5450.2430.1300.021
object=heat-0.000-0.000-0.0000.0140.412
location=bedroom0.0020.0060.0820.9970.242
\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "explanation = benchmark.explain(\n", - " audio_path=audio_path, \n", - " methodology='LOO',\n", - " # Transcripts are passed explicitly.\n", - " words_trascript=words_trascript\n", - ")\n", - "\n", - "display(benchmark.show_table(explanation, decimals=3))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Explain paralinguistic impact" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 pitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoisepitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoise
action=increase0.080.040.130.110.190.040.750.44action=increase0.080.040.130.110.190.040.610.44
object=heat0.02-0.000.040.000.000.000.000.29object=heat0.02-0.000.040.000.000.000.000.29
location=bedroom0.220.130.330.020.030.010.420.60location=bedroom0.220.130.330.020.030.010.390.60
\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -848,7 +672,7 @@ ], "source": [ "explain_table = benchmark.explain(\n", - " audio_path=audio_path,\n", + " audio_path_or_array=audio_path,\n", " methodology='perturb_paraling',\n", ")\n", "display(benchmark.show_table(explain_table, decimals=2))" @@ -863,20 +687,21 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "perturbation_types = ['time stretching', 'pitch shifting', 'reverberation', 'noise']\n", "variations_table = benchmark.explain_variations(\n", - " audio_path=audio_path,\n", + " audio_path_or_array=audio_path,\n", + " current_sr=16e3,\n", " perturbation_types=perturbation_types\n", ")" ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -918,7 +743,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.10.14" } }, "nbformat": 4, diff --git a/ferret/__init__.py b/ferret/__init__.py index 2fd0143..3fc607b 100644 --- a/ferret/__init__.py +++ b/ferret/__init__.py @@ -4,9 +4,16 @@ __email__ = "giuseppeattanasio6@gmail.com" __version__ = "0.5.0" -from logging import getLogger +import logging -logger = getLogger(__name__) +# create logger +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) +ch = logging.StreamHandler() +ch.setLevel(logging.DEBUG) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +logger.addHandler(ch) from .benchmark import Benchmark @@ -37,6 +44,18 @@ from .modeling.text_helpers import TokenClassificationHelper +# Check for manual installation of `WhisperX`. +try: + import whisperx +except ImportError as e: + logging.error( + 'Library whisperx not found. Please install it manually from GitHub: ' + '`pip install git+https://github.com/m-bain/whisperx.git`' + ) + + raise e + + # Conditional imports for speech-related tasks try: # Explainers @@ -57,7 +76,12 @@ AOPC_Comprehensiveness_Evaluation_Speech, AOPC_Sufficiency_Evaluation_Speech, ) -except ImportError: - logger.info( - "Speech-related modules could not be imported. It is very likely that ferret was installed in the standard, text-only mode. Run `pip install ferret-xai[speech]` or `pip install ferret-xai[all] to include them." +except ImportError as e: + logger.error( + 'Speech-related modules could not be imported. It is very likely that' + ' ferret was installed in the standard, text-only mode. Run ' + '`pip install ferret-xai[speech]` or `pip install ferret-xai[all]` to' + ' include them' ) + + raise e diff --git a/ferret/benchmark_speech.py b/ferret/benchmark_speech.py index 9110c50..fc4ce5c 100644 --- a/ferret/benchmark_speech.py +++ b/ferret/benchmark_speech.py @@ -1,16 +1,18 @@ import numpy as np import pandas as pd -from typing import Dict, List, Union, Tuple -from pydub import AudioSegment +from typing import List, Union, Tuple, Optional import torch import seaborn as sns -from IPython.display import display from .explainers.explanation_speech.loo_speech_explainer import LOOSpeechExplainer -from .explainers.explanation_speech.gradient_speech_explainer import GradientSpeechExplainer +from .explainers.explanation_speech.gradient_speech_explainer import ( + GradientSpeechExplainer, +) from .explainers.explanation_speech.lime_speech_explainer import LIMESpeechExplainer -from .explainers.explanation_speech.paraling_speech_explainer import ParalinguisticSpeechExplainer -from .explainers.explanation_speech.explanation_speech import ExplanationSpeech -from .speechxai_utils import pydub_to_np, print_log +from .explainers.explanation_speech.paraling_speech_explainer import ( + ParalinguisticSpeechExplainer, +) +from .speechxai_utils import FerretAudio, transcribe_audio +from tqdm.autonotebook import tqdm SCORES_PALETTE = sns.diverging_palette(240, 10, as_cmap=True) @@ -30,14 +32,14 @@ def __init__( self, model, feature_extractor, - device: str = "cuda:0", + device: str = "cpu", language: str = "en", explainers=None, ): self.model = model self.feature_extractor = feature_extractor self.model.eval() - self.device = torch.device(device if torch.cuda.is_available() else "cpu") + self.device = torch.device(device) self.language = language if "superb-ic" in self.model.name_or_path: @@ -48,7 +50,9 @@ def __init__( self.model, self.feature_extractor, self.device, "en" ) elif "ITALIC" in self.model.name_or_path: - from .modeling.speech_model_helpers.model_helper_italic import ModelHelperITALIC + from .modeling.speech_model_helpers.model_helper_italic import ( + ModelHelperITALIC, + ) self.model_helper = ModelHelperITALIC( self.model, self.feature_extractor, self.device, "it" @@ -86,9 +90,51 @@ def predict( # Just a wrapper around ModelHelperFSC.predict/ModelHelperFSC.predict_single We use the second to overcome the padding issue return self.model_helper.predict(audios) + def _transcribe(self, **transcription_args): + transcription_output = transcribe_audio(**transcription_args) + return transcription_output + + def transcribe( + self, + audio_path_or_array: Union[str, np.ndarray], + current_sr: Optional[int] = None, + batch_size: Optional[int] = 1, + compute_type: Optional[str] = "float32", + model_name_whisper: Optional[str] = "large-v2", + ): + """ + Transcribe the audio and return the transcription. + + Args: + audio_path_or_array: path to the audio file or numpy array with the audio data. + language: language of the audio + current_sr: current sample rate of the audio + batch_size: batch size for the transcription + compute_type: the type of the input data for the model + model_name_whisper: the name of the model to use for the transcription + + Returns: + (text, word_transcripts) + """ + # we do this to introduce sanity checks on the audio + audio = FerretAudio(audio_path_or_array, current_sr=current_sr) + if audio.current_sr != 16_000: + audio.resample(16_000) # this is required by WhisperX + + transcription_output = self._transcribe( + audio=audio.normalized_array, + language=self.language, + batch_size=batch_size, + compute_type=compute_type, + model_name_whisper=model_name_whisper, + device=self.device, + ) + return transcription_output + def explain( self, - audio_path: str, + audio_path_or_array: Union[str, np.ndarray], + current_sr: int = None, target_class: str = None, methodology: str = "LOO", perturbation_types: List[str] = [ @@ -104,7 +150,7 @@ def explain( removal_type: str = "silence", # Used only for LOO and LIME - explainer_args TODO aggregation: str = "mean", # Used only for Gradient and GradientXInput - explainer_args TODO num_samples: int = 1000, # Used only for LIME - explainer_args TODO - words_trascript: List = None, + word_timestamps: List = None, verbose: bool = False, verbose_target: int = 0, ): @@ -112,16 +158,19 @@ def explain( Explain the prediction of the model. Returns the importance of each segment in the audio. """ - explainer_args = {} + explainer_args = dict() # TODO UNIFY THE INPUT FORMAT + # 1. Run sanity checks + ferret_audio = FerretAudio(audio_path_or_array, current_sr=current_sr) + ## Get the importance of each class (action, object, location) according to the perturb_paraling type if methodology == "perturb_paraling": explanations = [] explainer = self.explainers["perturb_paraling"] - for perturbation_type in perturbation_types: + for perturbation_type in tqdm(perturbation_types, desc="Perturbation type"): explanation = explainer.compute_explanation( - audio_path=audio_path, + audio=ferret_audio, target_class=target_class, perturbation_type=perturbation_type, verbose=verbose, @@ -134,12 +183,22 @@ def explain( # elif: else: + if methodology not in self.explainers: raise ValueError( - f'Explainer {methodology} not supported. Choose between ' + f"Explainer {methodology} not supported. Choose between " '"LOO", "Gradient", "GradientXInput", "LIME", ' '"perturb_paraling"' ) + + # 2. We will need word level transcripts, let's force generate them if not provided + if word_timestamps is None: + print("Transcribing audio to get word level timestamps...") + text, word_timestamps = self.transcribe( + audio_path_or_array=audio_path_or_array, current_sr=current_sr + ) + print(f"Transcribed audio with whisperX into: {text}") + if "LOO" in methodology: explainer_args["removal_type"] = removal_type elif "LIME" in methodology: @@ -151,9 +210,9 @@ def explain( explainer = self.explainers[methodology] explanation = explainer.compute_explanation( - audio_path=audio_path, + audio=ferret_audio, target_class=target_class, - words_trascript=words_trascript, + word_timestamps=word_timestamps, **explainer_args, ) explanations = explanation @@ -185,9 +244,7 @@ def create_table( if explanations[i].target != explanations[i + 1].target ] == [], "The explanations must have the same target class" assert [ - True - for explanation in explanations - if len(explanation.features) > 1 + True for explanation in explanations if len(explanation.features) > 1 ] == [], "The explanation feature should only be one" importance_df = pd.DataFrame( [explanation.scores for explanation in explanations] @@ -240,10 +297,23 @@ def show_table(self, explanations, apply_style: bool = True, decimals=4): else table.apply(pd.to_numeric).style.format(precision=decimals) ) - def explain_variations(self, audio_path, perturbation_types, target_class=None): - perturbation_df_by_type = self.explainers[ - "perturb_paraling" - ].explain_variations(audio_path, perturbation_types, target_class) + def explain_variations( + self, + audio_path_or_array, + current_sr: int, + perturbation_types: List[int], + target_class=None, + ): + """ + Explain the variations of the audio. + Returns the importance of each perturbation. + """ + audio = FerretAudio( + audio_path_or_array=audio_path_or_array, current_sr=current_sr + ) + perturbation_df_by_type = self.explainers["perturb_paraling"].explain_variations( + audio, perturbation_types, target_class + ) return perturbation_df_by_type def plot_variations(self, perturbation_df_by_type, show_diff=False, figsize=(5, 5)): @@ -327,9 +397,7 @@ def plot_variations(self, perturbation_df_by_type, show_diff=False, figsize=(5, ax.set_xlabel( "signal-to-noise ratio (dB)", fontsize=label_size, labelpad=-2 ) - ax.set_xticks( - np.arange(len(x_labels)), labels=x_labels, fontsize=label_size - ) + ax.set_xticks(np.arange(len(x_labels)), labels=x_labels, fontsize=label_size) ax.set_title(perturbation_type, fontsize=label_size) @@ -348,4 +416,4 @@ def plot_variations(self, perturbation_df_by_type, show_diff=False, figsize=(5, cbar.ax.tick_params(labelsize=label_size) plt.show() - return fig \ No newline at end of file + return fig diff --git a/ferret/evaluators/faithfulness_measures_speech.py b/ferret/evaluators/faithfulness_measures_speech.py index f8b8107..db1d61e 100644 --- a/ferret/evaluators/faithfulness_measures_speech.py +++ b/ferret/evaluators/faithfulness_measures_speech.py @@ -10,7 +10,9 @@ ) from ..evaluators.faithfulness_measures import _compute_aopc from ..explainers.explanation_speech.explanation_speech import ( - ExplanationSpeech, EvaluationSpeech) + ExplanationSpeech, + EvaluationSpeech, +) from ..explainers.explanation_speech.utils_removal import remove_specified_words from ..speechxai_utils import pydub_to_np @@ -25,7 +27,7 @@ def compute_evaluation( self, explanation: ExplanationSpeech, target=None, - words_trascript: List = None, + # word_timestamps: List = None, **evaluation_args, ) -> EvaluationSpeech: """Evaluate an explanation on the AOPC Comprehensiveness metric. @@ -57,13 +59,10 @@ def compute_evaluation( 'The "target" argument is deprecated and will be removed in a future version. The explanation target are used as default.' ) - audio_path = explanation.audio_path - target = explanation.target - # Get the audio from audio_path - audio = AudioSegment.from_wav(audio_path) - audio_np = pydub_to_np(audio)[0] + # Get audio as array. + audio_np = explanation.audio.normalized_array # Get prediction probability of the input sencence for the target ground_truth_probs = self.model_helper.predict([audio_np]) @@ -78,17 +77,12 @@ def compute_evaluation( # Single probability ground_truth_probs_target = [ground_truth_probs[0][target[0]]] - # Splite the audio into word-level audio segments - from ..explainers.explanation_speech.loo_speech_explainer import transcribe_audio + # TODO: modify to accept a `FerretAudio` object as input. + # Split the audio into word-level audio segments + from ..speechxai_utils import transcribe_audio - if words_trascript is None: - text, words_trascript = transcribe_audio( - audio_path=audio_path, - device=self.model_helper.device.type, - batch_size=2, - compute_type="float32", - language=self.model_helper.language, - ) + # if word_timestamps is None: + word_timestamps = explanation.word_timestamps get_discrete_rationale_function = ( _check_and_define_get_id_discrete_rationale_function( @@ -148,10 +142,12 @@ def compute_evaluation( # For the comprehensiveness: we remove the terms in the discrete rationale. - words_removed = [words_trascript[i] for i in id_top] + words_removed = [word_timestamps[i] for i in id_top] audio_removed = remove_specified_words( - audio, words_removed, removal_type=removal_type + explanation.audio.to_pydub(), + words_removed, + removal_type=removal_type, ) audio_removed_np = pydub_to_np(audio_removed)[0] @@ -202,7 +198,7 @@ def compute_evaluation( self, explanation: ExplanationSpeech, target: List = None, - words_trascript: List = None, + # words_trascript: List = None, **evaluation_args, ) -> EvaluationSpeech: """Evaluate an explanation on the AOPC Sufficiency metric. @@ -234,13 +230,10 @@ def compute_evaluation( 'The "target" argument is deprecated and will be removed in a future version. The explanation target are used as default.' ) - audio_path = explanation.audio_path - target = explanation.target - # Get the audio from audio_path - audio = AudioSegment.from_wav(audio_path) - audio_np = pydub_to_np(audio)[0] + # Get audio as an array. + audio_np = explanation.audio.normalized_array # Get prediction probability of the input sencence for the target ground_truth_probs = self.model_helper.predict([audio_np]) @@ -255,17 +248,9 @@ def compute_evaluation( # Single probability ground_truth_probs_target = [ground_truth_probs[0][target[0]]] - # Splite the audio into word-level audio segments - from ..explainers.explanation_speech.loo_speech_explainer import transcribe_audio - - if words_trascript is None: - text, words_trascript = transcribe_audio( - audio_path=audio_path, - device=self.model_helper.device.type, - batch_size=2, - compute_type="float32", - language=self.model_helper.language, - ) + # Split the audio into word-level audio segments + # if words_trascript is None: + words_trascript = explanation.word_timestamps get_discrete_rationale_function = ( _check_and_define_get_id_discrete_rationale_function( @@ -332,7 +317,9 @@ def compute_evaluation( ] audio_removed = remove_specified_words( - audio, words_removed, removal_type=removal_type + explanation.audio.to_pydub(), + words_removed, + removal_type=removal_type, ) audio_removed_np = pydub_to_np(audio_removed)[0] diff --git a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py index 6cf8c4f..84b2ea4 100644 --- a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py @@ -4,9 +4,7 @@ import numpy as np import torch from ..explanation_speech import ExplanationSpeech -from ....speechxai_utils import pydub_to_np -# TODO - include in utils -from ..loo_speech_explainer import transcribe_audio +from ....speechxai_utils import FerretAudio class GradientEqualWidthSpeechExplainer: @@ -58,7 +56,7 @@ def _get_gradient_importance_frame_level( def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, aggregation: str = "mean", num_s_split: float = 0.25, @@ -66,7 +64,7 @@ def compute_explanation( """ Compute the word-level explanation for the given audio. Args: - audio_path: path to the audio file + audio: An instance of the FerretAudio class containing the input audio data. target_class: target class - int - If None, use the predicted class no_before_span: if True, it also consider the span before the word. This is because we observe gradient give importance also for the frame just before the word aggregation: aggregation method for the frames of the word. Can be "mean" or "max" @@ -78,12 +76,10 @@ def compute_explanation( "Aggregation method not supported, choose between 'mean' and 'max'" ) - # Load audio and convert to np.array - audio_as = AudioSegment.from_wav(audio_path) - audio = pydub_to_np(audio_as)[0] + audio_np = audio.normalized_array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_np]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -108,7 +104,7 @@ def compute_explanation( for target_label, target_class in enumerate(targets): # Get gradient importance for each frame attr = self._get_gradient_importance_frame_level( - audio, target_class, target_label + audio_np, target_class, target_label ) old_start = 0 @@ -117,7 +113,9 @@ def compute_explanation( importances = [] a, b = 0, 0 # 50, 20 - duration_s = len(audio_as) / 1000 + # Note: assuming mono audio here ([duration in s] = [n samples] / + # [sample rate]). + duration_s = len(audio_np) / audio.sample_rate a, b = 0, 0 for e, i in enumerate(np.arange(0, duration_s, num_s_split)): @@ -160,7 +158,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "-" + aggregation, target=targets if n_labels > 1 else targets, - audio_path=audio_path, + audio=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py index 6a59a3c..e281ad8 100644 --- a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py @@ -2,9 +2,8 @@ from pydub import AudioSegment import numpy as np from ..lime_timeseries import LimeTimeSeriesExplainer -from ..utils_removal import transcribe_audio from ..explanation_speech import ExplanationSpeech -from ....speechxai_utils import pydub_to_np +from ....speechxai_utils import FerretAudio EMPTY_SPAN = "---" @@ -17,7 +16,7 @@ def __init__(self, model_helper): def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, removal_type: str = "silence", num_samples: int = 1000, @@ -25,7 +24,7 @@ def compute_explanation( ) -> ExplanationSpeech: """ Compute the word-level explanation for the given audio. - audio_path: path to the audio file + audio: An instance of the FerretAudio class containing the input audio data. target_class: target class - int - If None, use the predicted class removal_type: """ @@ -35,12 +34,10 @@ def compute_explanation( "Removal method not supported, choose between 'silence' and 'noise'" ) - # Load audio and convert to np.array - audio_as = AudioSegment.from_wav(audio_path) - audio = pydub_to_np(audio_as)[0] + audio_np = audio.normalized_array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_np]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -59,13 +56,11 @@ def compute_explanation( else: targets = [int(np.argmax(logits_original, axis=1)[0])] - audio_np = audio.reshape(1, -1) - # Get the start and end indexes of the segments. These will be used to split the audio and derive LIME interpretable features sampling_rate = self.model_helper.feature_extractor.sampling_rate splits = [] - duration_s = len(audio_as) / 1000 + duration_s = len(audio_np) / audio.sample_rate a, b = 0, 0 for e, i in enumerate(np.arange(0, duration_s, num_s_split)): @@ -92,7 +87,10 @@ def compute_explanation( predict_proba_function = self.model_helper.predict from copy import deepcopy - input_audio = deepcopy(audio_np) + # WARNING: this is the original reshaping, which assumes that + # `LimeTimeSeriesExplainer` accepts an array with shape + # (1, n_samples). + input_audio = deepcopy(audio_np.reshape(1, -1)) # Explain the instance using the splits as interpretable features exp = lime_explainer.explain_instance( @@ -136,7 +134,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else targets, - audio_path=audio_path, + audio=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py index a560a0c..031cb41 100644 --- a/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py @@ -6,7 +6,7 @@ from pydub import AudioSegment from IPython.display import display from ..explanation_speech import ExplanationSpeech -from ....speechxai_utils import pydub_to_np, print_log +from ....speechxai_utils import pydub_to_np, print_log, FerretAudio def remove_audio_segment(audio, start_s, end_s, removal_type: str = "silence"): @@ -41,7 +41,7 @@ def remove_audio_segment(audio, start_s, end_s, removal_type: str = "silence"): # display(audio_removed) elif removal_type == "pink noise": sounds_path = (os.path.join(os.path.dirname(__file__), "pink_noise.mp3"),) - replace_word_audio = AudioSegment.from_mp3(sound_path)[:word_duration] + replace_word_audio = AudioSegment.from_mp3(sounds_path)[:word_duration] audio_removed = before_word_audio + replace_word_audio + after_word_audio return audio_removed @@ -55,7 +55,7 @@ def __init__(self, model_helper): def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, removal_type: str = "silence", num_s_split: float = 0.25, @@ -66,19 +66,21 @@ def compute_explanation( """ ## Load audio as pydub.AudioSegment - audio = AudioSegment.from_wav(audio_path) - audio_np = pydub_to_np(audio)[0] + audio_as = audio.to_pydub() + audio_np = audio.normalized_array ## Remove word audio_remove_segments = [] - duration_s = len(audio) / 1000 + duration_s = len(audio_as) / 1000 for i in np.arange(0, duration_s, num_s_split): start_s = i end_s = min(i + num_s_split, duration_s) - audio_removed = remove_audio_segment(audio, start_s, end_s, removal_type) + audio_removed = remove_audio_segment(audio_as, start_s, end_s, removal_type) + # Using `pydub_to_np` to avoid converting to a `FerretAudio` + # instance with no real need for it. audio_remove_segments.append(pydub_to_np(audio_removed)[0]) if display_audio: @@ -137,7 +139,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else [targets], - audio_path=audio_path, + audio=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/explanation_speech.py b/ferret/explainers/explanation_speech/explanation_speech.py index 139fe38..5f74309 100644 --- a/ferret/explainers/explanation_speech/explanation_speech.py +++ b/ferret/explainers/explanation_speech/explanation_speech.py @@ -1,6 +1,7 @@ from dataclasses import dataclass import numpy as np -from typing import Optional +from typing import Optional, List, Dict +from ...speechxai_utils import FerretAudio @dataclass @@ -9,7 +10,8 @@ class ExplanationSpeech: scores: np.array explainer: str target: list - audio_path: Optional[str] = None + audio: FerretAudio + word_timestamps: Optional[List[Dict]] = None @dataclass @@ -25,4 +27,4 @@ class EvaluationSpeech: name: str score: list - target: list \ No newline at end of file + target: list diff --git a/ferret/explainers/explanation_speech/gradient_speech_explainer.py b/ferret/explainers/explanation_speech/gradient_speech_explainer.py index a3b8efe..e8ff2b7 100644 --- a/ferret/explainers/explanation_speech/gradient_speech_explainer.py +++ b/ferret/explainers/explanation_speech/gradient_speech_explainer.py @@ -4,9 +4,9 @@ import numpy as np import torch from .explanation_speech import ExplanationSpeech -from ...speechxai_utils import pydub_to_np +from ...speechxai_utils import pydub_to_np, FerretAudio + # TODO - include in utils -from .loo_speech_explainer import transcribe_audio class GradientSpeechExplainer: @@ -58,16 +58,16 @@ def _get_gradient_importance_frame_level( def compute_explanation( self, - audio_path: str, + audio: FerretAudio, + word_timestamps: List, target_class=None, - words_trascript: List = None, no_before_span: bool = True, aggregation: str = "mean", ) -> ExplanationSpeech: """ Compute the word-level explanation for the given audio. Args: - audio_path: path to the audio file + audio: An instance of the FerretAudio class containing the input audio data. target_class: target class - int - If None, use the predicted class no_before_span: if True, it also consider the span before the word. This is because we observe gradient give importance also for the frame just before the word aggregation: aggregation method for the frames of the word. Can be "mean" or "max" @@ -79,10 +79,13 @@ def compute_explanation( ) # Load audio and convert to np.array - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] + # Note: we use the normalized array for consistency with the original + # SpeechXAI code (it used to come from the `pydub_to_np` + # function). + audio_array = audio.normalized_array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -101,11 +104,9 @@ def compute_explanation( else: targets = [int(np.argmax(logits_original, axis=1)[0])] - if words_trascript is None: - # Transcribe audio - _, words_trascript = transcribe_audio( - audio_path=audio_path, language=self.model_helper.language - ) + # if word_timestamps is None: + # # Transcribe audio + word_timestamps = audio.transcription # Compute gradient importance for each target label # This also handles the multilabel scenario as for FSC @@ -113,7 +114,7 @@ def compute_explanation( for target_label, target_class in enumerate(targets): # Get gradient importance for each frame attr = self._get_gradient_importance_frame_level( - audio, target_class, target_label + audio_array, target_class, target_label ) old_start = 0 @@ -122,7 +123,7 @@ def compute_explanation( importances = [] a, b = 0, 0 # 50, 20 - for word in words_trascript: + for word in word_timestamps: if no_before_span: # We directly consider the transcribed word start_ms = (word["start"] * 1000 - a) / 1000 @@ -174,14 +175,15 @@ def compute_explanation( else: scores = np.array([importances]) - features = [word["word"] for word in words_trascript] + features = [word["word"] for word in word_timestamps] explanation = ExplanationSpeech( features=features, scores=scores, explainer=self.NAME + "-" + aggregation, target=targets if n_labels > 1 else targets, - audio_path=audio_path, + audio=audio, + word_timestamps=word_timestamps, ) - return explanation \ No newline at end of file + return explanation diff --git a/ferret/explainers/explanation_speech/lime_speech_explainer.py b/ferret/explainers/explanation_speech/lime_speech_explainer.py index 18fe41c..8ed37f4 100644 --- a/ferret/explainers/explanation_speech/lime_speech_explainer.py +++ b/ferret/explainers/explanation_speech/lime_speech_explainer.py @@ -2,9 +2,8 @@ from pydub import AudioSegment import numpy as np from .lime_timeseries import LimeTimeSeriesExplainer -from .utils_removal import transcribe_audio from .explanation_speech import ExplanationSpeech -from ...speechxai_utils import pydub_to_np +from ...speechxai_utils import FerretAudio EMPTY_SPAN = "---" @@ -17,16 +16,16 @@ def __init__(self, model_helper): def compute_explanation( self, - audio_path: str, + audio: FerretAudio, + word_timestamps: List, target_class=None, - words_trascript: List = None, removal_type: str = "silence", num_samples: int = 1000, ) -> ExplanationSpeech: """ Compute the word-level explanation for the given audio. Args: - audio_path: path to the audio file + audio: An instance of the FerretAudio class containing the input audio data. target_class: target class - int - If None, use the predicted class removal_type: """ @@ -36,11 +35,13 @@ def compute_explanation( "Removal method not supported, choose between 'silence' and 'noise'" ) - # Load audio and convert to np.array - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] + # Note: we use the normalized array for consistency with the original + # SpeechXAI code (it used to come from the `pydub_to_np` + # function). + audio_array = audio.normalized_array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -59,20 +60,13 @@ def compute_explanation( else: targets = [int(np.argmax(logits_original, axis=1)[0])] - if words_trascript is None: - # Transcribe audio - _, words_trascript = transcribe_audio( - audio_path=audio_path, language=self.model_helper.language - ) - audio_np = audio.reshape(1, -1) - # Get the start and end indexes of the words. These will be used to split the audio and derive LIME interpretable features - tot_len = audio.shape[0] + tot_len = audio_array.shape[0] sampling_rate = self.model_helper.feature_extractor.sampling_rate splits = [] old_start = 0 a, b = 0, 0 - for word in words_trascript: + for word in word_timestamps: start, end = int((word["start"] + a) * sampling_rate), int( (word["end"] + b) * sampling_rate ) @@ -96,7 +90,7 @@ def compute_explanation( predict_proba_function = self.model_helper.predict from copy import deepcopy - input_audio = deepcopy(audio_np) + input_audio = deepcopy(audio_array.reshape(1, -1)) # Explain the instance using the splits as interpretable features exp = lime_explainer.explain_instance( @@ -113,9 +107,7 @@ def compute_explanation( map_scores = {k: v for k, v in exp.as_map()[target_class]} map_scores = { k: v - for k, v in sorted( - map_scores.items(), key=lambda x: x[0], reverse=False - ) + for k, v in sorted(map_scores.items(), key=lambda x: x[0], reverse=False) } # Remove the 'empty' spans, the spans between words @@ -143,7 +135,8 @@ def compute_explanation( scores=scores, explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else targets, - audio_path=audio_path, + audio=audio, + word_timestamps=word_timestamps, ) - return explanation \ No newline at end of file + return explanation diff --git a/ferret/explainers/explanation_speech/loo_speech_explainer.py b/ferret/explainers/explanation_speech/loo_speech_explainer.py index 31a6d7f..5588d8e 100644 --- a/ferret/explainers/explanation_speech/loo_speech_explainer.py +++ b/ferret/explainers/explanation_speech/loo_speech_explainer.py @@ -1,11 +1,15 @@ """LOO Speech Explainer module""" + import numpy as np from typing import Dict, List, Union, Tuple from pydub import AudioSegment from IPython.display import display from .explanation_speech import ExplanationSpeech -from .utils_removal import transcribe_audio, remove_word -from ...speechxai_utils import pydub_to_np, print_log +from .utils_removal import remove_word +from ...speechxai_utils import pydub_to_np, FerretAudio +from logging import getLogger + +logger = getLogger(__name__) class LOOSpeechExplainer: @@ -16,9 +20,9 @@ def __init__(self, model_helper): def remove_words( self, - audio_path: str, + audio: FerretAudio, + word_timestamps: List, removal_type: str = "nothing", - words_trascript: List = None, display_audio: bool = False, ) -> Tuple[List[AudioSegment], List[Dict[str, Union[str, float]]]]: """ @@ -29,40 +33,31 @@ def remove_words( - pink noise """ - ## Transcribe audio - - if words_trascript is None: - text, words_trascript = transcribe_audio( - audio_path=audio_path, - device=self.model_helper.device.type, - batch_size=2, - compute_type="float32", - language=self.model_helper.language, - ) - ## Load audio as pydub.AudioSegment - audio = AudioSegment.from_wav(audio_path) + pydub_segment = audio.to_pydub() ## Remove word - audio_no_words = [] + audio_no_words = list() - for word in words_trascript: - audio_removed = remove_word(audio, word, removal_type) + for word in word_timestamps: + audio_removed = remove_word(pydub_segment, word, removal_type) + # Note: we might potentially put `audio_removed` into a + # `FerretAudio` object, but it'd be an additional step. audio_no_words.append(pydub_to_np(audio_removed)[0]) if display_audio: - print_log(word["word"]) + print(word["word"]) display(audio_removed) - return audio_no_words, words_trascript + return audio_no_words, word_timestamps def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, removal_type: str = None, - words_trascript: List = None, + word_timestamps: List = None, ) -> ExplanationSpeech: """ Computes the importance of each word in the audio. @@ -70,19 +65,23 @@ def compute_explanation( ## Get modified audio by leaving a single word out and the words modified_audios, words = self.remove_words( - audio_path, removal_type, words_trascript=words_trascript + audio=audio, word_timestamps=word_timestamps, removal_type=removal_type ) logits_modified = self.model_helper.predict(modified_audios) - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] + # Note: we use the normalized array for consistency with the original + # SpeechXAI code (it used to come from the `pydub_to_np` + # function). + audio_array = audio.normalized_array - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels # TODO + # TODO GA: what? if target_class is not None: targets = target_class @@ -100,9 +99,7 @@ def compute_explanation( if n_labels > 1: # Multilabel scenario as for FSC modified_trg = [logits_modified[i][:, targets[i]] for i in range(n_labels)] - original_gt = [ - logits_original[i][:, targets[i]][0] for i in range(n_labels) - ] + original_gt = [logits_original[i][:, targets[i]][0] for i in range(n_labels)] else: modified_trg = logits_modified[:, targets] @@ -112,9 +109,7 @@ def compute_explanation( if n_labels > 1: # Multilabel scenario as for FSC - prediction_diff = [ - original_gt[i] - modified_trg[i] for i in range(n_labels) - ] + prediction_diff = [original_gt[i] - modified_trg[i] for i in range(n_labels)] else: prediction_diff = [original_gt - modified_trg] @@ -125,7 +120,8 @@ def compute_explanation( scores=scores, explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else [targets], - audio_path=audio_path, + audio=audio, # TODO GA: I don't know if this is something we want to keep + word_timestamps=word_timestamps, ) return explanation diff --git a/ferret/explainers/explanation_speech/paraling_speech_explainer.py b/ferret/explainers/explanation_speech/paraling_speech_explainer.py index 2f1edc1..8469af6 100644 --- a/ferret/explainers/explanation_speech/paraling_speech_explainer.py +++ b/ferret/explainers/explanation_speech/paraling_speech_explainer.py @@ -1,4 +1,5 @@ """Paralinguistic Speech Explainer module""" + import os import numpy as np import pandas as pd @@ -14,7 +15,16 @@ PolarityInversion, ) from .explanation_speech import ExplanationSpeech -from ...speechxai_utils import pydub_to_np, print_log +from ...speechxai_utils import pydub_to_np, FerretAudio +import torchaudio.functional as F +import torch +from audiostretchy.stretch import AudioStretch +import audio_effects +import tempfile +from io import BytesIO +import requests + +from copy import deepcopy # If True, We use the audiostretchy library to perform time stretching @@ -25,6 +35,12 @@ REFERENCE_STR = "-" +ENDPOINTS = { + "WHITE_NOISE": "https://github.com/g8a9/ferret/raw/feat/support-speech-from-array/ferret/explainers/explanation_speech/white_noise.mp3", + "PINK_NOISE": "https://github.com/g8a9/ferret/raw/feat/support-speech-from-array/ferret/explainers/explanation_speech/pink_noise.mp3", +} + + def _tmp_log1( verbose_target, original_gt, @@ -32,31 +48,31 @@ def _tmp_log1( n_labels, ): if n_labels > 1: - print_log("Target label: ", verbose_target) - print_log("gt", original_gt[verbose_target]) - print_log("m", modified_trg[verbose_target]) + print("Target label: ", verbose_target) + print("gt", original_gt[verbose_target]) + print("m", modified_trg[verbose_target]) else: - print_log("gt", original_gt) - print_log("m", modified_trg) + print("gt", original_gt) + print("m", modified_trg) -def _tmp_log2( - verbose_target, - original_gt, - modified_trg, - n_labels, -): - if n_labels > 1: - print_log( - [ - original_gt[verbose_target] - modified_trg[verbose_target][i] - for i in range(modified_trg[verbose_target].shape[0]) - ] - ) +# def _tmp_log2( +# verbose_target, +# original_gt, +# modified_trg, +# n_labels, +# ): +# if n_labels > 1: +# print_log( +# [ +# original_gt[verbose_target] - modified_trg[verbose_target][i] +# for i in range(modified_trg[verbose_target].shape[0]) +# ] +# ) - else: - print_log([original_gt - modified_trg[i] for i in range(modified_trg.shape[0])]) +# else: +# print_log([original_gt - modified_trg[i] for i in range(modified_trg.shape[0])]) class ParalinguisticSpeechExplainer: @@ -131,7 +147,6 @@ def augmentation( def time_stretching_augmentation( self, audio_as: AudioSegment, perturbation_value: float ): - import audio_effects if perturbation_value < 1: perturbed_audio_as = audio_effects.speed_down(audio_as, perturbation_value) @@ -141,14 +156,19 @@ def time_stretching_augmentation( return perturbed_audio.squeeze() def time_stretching_augmentation_AudioStretch( - self, audio_path: str, perturbation_value: float + self, audio: FerretAudio, perturbation_value: float ): - from audiostretchy.stretch import AudioStretch + pydub_segment = audio.to_pydub() audio_stretch = AudioStretch() - audio_stretch.open(audio_path) - audio_stretch.stretch(ratio=perturbation_value) - perturbated_audio_samples = np.array(audio_stretch.samples, dtype=np.float32) + with tempfile.NamedTemporaryFile(suffix=".wav") as temp_audio: + pydub_segment.export(temp_audio.name, format="wav") + temp_audio.seek(0) + + audio_stretch.open(temp_audio.name) + audio_stretch.stretch(ratio=perturbation_value) + perturbated_audio_samples = np.array(audio_stretch.samples, dtype=np.float32) + return perturbated_audio_samples def pitch_shifting_augmentation( @@ -172,13 +192,11 @@ def add_white_noise_torchaudio(self, original_speech, noise_rate): noise_rate: signal-to-noise ratios in dB """ - import torchaudio.functional as F - from copy import deepcopy - import torch - - WHITE_NOISE = os.path.join(os.path.dirname(__file__), "white_noise.mp3") + # WHITE_NOISE = os.path.join(os.path.dirname(__file__), "white_noise.mp3") + # noise_as = AudioSegment.from_mp3(WHITE_NOISE) - noise_as = AudioSegment.from_mp3(WHITE_NOISE) + res = requests.get(ENDPOINTS["WHITE_NOISE"]) + noise_as = AudioSegment.from_file(BytesIO(res.content), "mp3") noise, frame_rate = pydub_to_np(noise_as) # Reshape and convert to torch tensor @@ -205,9 +223,6 @@ def change_pitch_torchaudio(self, original_speech, frame_rate, perturbation_valu perturbation_value: """ - import torchaudio.functional as F - import torch - # Reshape and convert to torch tensor audio_t = torch.tensor(original_speech.reshape(1, -1)) perturbated_audio = F.pitch_shift( @@ -218,12 +233,12 @@ def change_pitch_torchaudio(self, original_speech, frame_rate, perturbation_valu def perturbe_waveform( self, - audio_path: str, + audio: FerretAudio, perturbation_type: str, return_perturbations=False, verbose: bool = False, verbose_target: int = 0, - ): # -> List[np.ndarray]: + ): """ Perturbate audio using pydub, by adding: - pitch shifting @@ -233,8 +248,8 @@ def perturbe_waveform( """ ## Load audio as pydub.AudioSegment - audio_as = AudioSegment.from_wav(audio_path) - audio, frame_rate = pydub_to_np(audio_as) + # audio_as = AudioSegment.from_wav(audio_path) + # audio, frame_rate = pydub_to_np(audio_as) ## Perturbate audio perturbated_audios = [] @@ -306,9 +321,7 @@ def perturbe_waveform( raise ValueError(f"Perturbation '{perturbation_type}' is not available") if verbose: - from IPython.display import Audio - print_log("Original audio") # Display the original audio and show its info for a single class self._tmp_log_show_info( "Original audio", @@ -317,27 +330,36 @@ def perturbe_waveform( verbose_target, ) + pydub_segment = audio.to_pydub() + for perturbation_value in perturbations: if "time stretching" in perturbation_type: if USE_AUDIOSTRETCH: perturbated_audio = self.time_stretching_augmentation_AudioStretch( - audio_path, perturbation_value + audio=audio, perturbation_value=perturbation_value ) else: perturbated_audio = self.time_stretching_augmentation( - audio_as, perturbation_value + pydub_segment, perturbation_value ) elif "pitch shifting" in perturbation_type: # perturbated_audio = self.pitch_shifting_augmentation( # audio_as, perturbation_value # ) + + + # Note: here we assume frame rate and sample rate are the + # same, which is always true for single-channel (mono) + # audio. perturbated_audio = self.change_pitch_torchaudio( - audio, frame_rate, perturbation_value + audio.normalized_array, + audio.current_sr, + perturbation_value, ) elif perturbation_type == "noise" and USE_ADD_NOISE_TORCHAUDIO: perturbated_audio = self.add_white_noise_torchaudio( - audio, perturbation_value + audio.normalized_array, perturbation_value ) else: augment = self.augmentation( @@ -345,7 +367,8 @@ def perturbe_waveform( perturbation_type=perturbation_type, ) perturbated_audio = augment( - samples=audio.squeeze(), sample_rate=frame_rate + samples=audio.normalized_array.squeeze(), + sample_rate=audio.current_sr ) if verbose: @@ -365,7 +388,7 @@ def perturbe_waveform( def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, perturbation_type: str = None, verbose: bool = False, @@ -376,7 +399,7 @@ def compute_explanation( """ modified_audios = self.perturbe_waveform( - audio_path, + audio, perturbation_type, verbose=verbose, verbose_target=verbose_target, @@ -386,9 +409,10 @@ def compute_explanation( logits_modified = self.model_helper.predict(modified_audios) - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] - - logits_original = self.model_helper.predict([audio]) + # Note: we use the normalized array for consistency with the original + # SpeechXAI code (it used to come from the `pydub_to_np` + # function). + logits_original = self.model_helper.predict([audio.normalized_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -411,9 +435,7 @@ def compute_explanation( if n_labels > 1: # Multilabel scenario as for FSC modified_trg = [logits_modified[i][:, targets[i]] for i in range(n_labels)] - original_gt = [ - logits_original[i][:, targets[i]][0] for i in range(n_labels) - ] + original_gt = [logits_original[i][:, targets[i]][0] for i in range(n_labels)] else: modified_trg = logits_modified[:, targets] @@ -421,8 +443,7 @@ def compute_explanation( if verbose: _tmp_log1(verbose_target, original_gt, modified_trg, n_labels) - - _tmp_log2(verbose_target, original_gt, modified_trg, n_labels) + # _tmp_log2(verbose_target, original_gt, modified_trg, n_labels) ## Compute the difference between the ground truth and the modified audio # prediction_diff = original_gt - np.mean(modified_trg) @@ -444,21 +465,22 @@ def compute_explanation( scores=scores, explainer=self.NAME, target=targets if n_labels > 1 else [targets], - audio_path=audio_path, + audio=audio, ) return explanation - def explain_variations(self, audio_path, perturbation_types, target_class=None): + def explain_variations( + self, audio: FerretAudio, perturbation_types: List[int], target_class=None + ): n_labels = self.model_helper.n_labels - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] + audio_array = audio.normalized_array - original_gt = self.model_helper.get_predicted_probs(audio=audio) + original_gt = self.model_helper.get_predicted_probs(audio=audio_array) if target_class is None: - targets = self.model_helper.get_predicted_classes(audio=audio) - + targets = self.model_helper.get_predicted_classes(audio=audio_array) else: targets = target_class @@ -467,7 +489,7 @@ def explain_variations(self, audio_path, perturbation_types, target_class=None): perturbation_df_by_type = {} for perturbation_type in perturbation_types: perturbated_audios, perturbations = self.perturbe_waveform( - audio_path, perturbation_type, return_perturbations=True + audio, perturbation_type, return_perturbations=True ) if "time stretching" in perturbation_type: @@ -486,7 +508,6 @@ def explain_variations(self, audio_path, perturbation_types, target_class=None): prob_variations.append( [probs_modified[i][:, targets[i]][0] for i in range(n_labels)] ) - else: prob_variations.append([probs_modified[:, targets][0]]) @@ -529,7 +550,7 @@ def _tmp_log_show_info( # Note that in a single label scenario, verbose_target is ignored (always 0) - print_log(perturbation_type, perturbation_value) + print(perturbation_type, perturbation_value) # Prediction probability predictions = self.model_helper.predict([perturbated_audio]) @@ -540,22 +561,22 @@ def _tmp_log_show_info( preds = self.model_helper.get_text_labels(predicted_labels) if self.model_helper.n_labels > 1: - print_log(f"Target label: {verbose_target}") - print_log( + print(f"Target label: {verbose_target}") + print( f"Predicted probs:", np.round(predictions[verbose_target], 3), ) - print_log( + print( "Predicted class: ", preds[verbose_target], f"id: {predicted_labels[verbose_target]}", ) else: - print_log( + print( f"Predicted probs: ", np.round(predictions[0], 3), ) - print_log( + print( "Predicted class: ", preds, f"id: {predicted_labels[0]}", diff --git a/ferret/explainers/explanation_speech/utils_removal.py b/ferret/explainers/explanation_speech/utils_removal.py index 5cf7126..20ed538 100644 --- a/ferret/explainers/explanation_speech/utils_removal.py +++ b/ferret/explainers/explanation_speech/utils_removal.py @@ -1,7 +1,6 @@ from pydub import AudioSegment -import whisperx import os -from typing import Dict, List, Union, Tuple +import numpy as np def remove_specified_words(audio, words, removal_type: str = "nothing"): @@ -50,106 +49,6 @@ def remove_specified_words(audio, words, removal_type: str = "nothing"): return audio_removed -def transcribe_audio( - audio_path: str, - device: str = "cuda", - batch_size: int = 2, - compute_type: str = "float32", - language: str = "en", - model_name_whisper: str = "large-v2", -) -> Tuple[str, List[Dict[str, Union[str, float]]]]: - """ - Transcribe audio using whisperx, - and return the text (transcription) and the words with their start and end times. - """ - - ## Load whisperx model - model_whisperx = whisperx.load_model( - model_name_whisper, - device, - compute_type=compute_type, - language=language, - ) - - ## Transcribe audio - audio = whisperx.load_audio(audio_path) - result = model_whisperx.transcribe(audio, batch_size=batch_size) - model_a, metadata = whisperx.load_align_model( - language_code=result["language"], device=device - ) - - ## Align timestamps - result = whisperx.align( - result["segments"], - model_a, - metadata, - audio, - device, - return_char_alignments=False, - ) - - if result is None or "segments" not in result or len(result["segments"]) == 0: - return "", [] - - if len(result["segments"]) == 1: - text = result["segments"][0]["text"] - words = result["segments"][0]["words"] - else: - text = " ".join( - result["segments"][i]["text"] for i in range(len(result["segments"])) - ) - words = [word for segment in result["segments"] for word in segment["words"]] - - # Remove words that are not properly transcribed - words = [word for word in words if "start" in word] - return text, words - - -def transcribe_audio_given_model( - model_whisperx, - audio_path: str, - batch_size: int = 2, - device: str = "cuda", -) -> Tuple[str, List[Dict[str, Union[str, float]]]]: - """ - Transcribe audio using whisperx, - and return the text (transcription) and the words with their start and end times. - """ - - ## Transcribe audio - audio = whisperx.load_audio(audio_path) - result = model_whisperx.transcribe(audio, batch_size=batch_size) - model_a, metadata = whisperx.load_align_model( - language_code=result["language"], device=device - ) - - ## Align timestamps - result = whisperx.align( - result["segments"], - model_a, - metadata, - audio, - device, - return_char_alignments=False, - ) - - if result is None or "segments" not in result or len(result["segments"]) == 0: - return "", [] - - if len(result["segments"]) == 1: - text = result["segments"][0]["text"] - words = result["segments"][0]["words"] - else: - text = " ".join( - result["segments"][i]["text"] for i in range(len(result["segments"])) - ) - words = [word for segment in result["segments"] for word in segment["words"]] - - # Remove words that are not properly transcribed - words = [word for word in words if "start" in word] - return text, words - - def remove_word(audio, word, removal_type: str = "nothing"): """ Remove a word from audio using pydub, by replacing it with: diff --git a/ferret/speechxai_utils.py b/ferret/speechxai_utils.py index 0c28560..827dc30 100644 --- a/ferret/speechxai_utils.py +++ b/ferret/speechxai_utils.py @@ -7,6 +7,220 @@ import torch from datasets import Dataset from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor +import librosa +import whisperx +from typing import Dict, List, Union, Tuple, Optional + + +class FerretAudio: + """ + Internal class to handle audio data. We force signal to be mono and of type np.float32 (i.e., 4 bytes to represent each sample). + """ + + def __init__( + self, + audio_path_or_array: Union[str, np.ndarray], + current_sr: Optional[int] = None, + ): + self.audio_path_or_array = audio_path_or_array + self.current_sr = current_sr + self._transcription = None + + if isinstance(audio_path_or_array, str): + self.array, self.current_sr = librosa.load( + audio_path_or_array, sr=None, dtype=np.float32 + ) + elif isinstance(audio_path_or_array, np.ndarray): + if current_sr is None: + raise ValueError( + "If audio is provided as a numpy array, the native sampling rate (native_sr arg) must be provided" + ) + self.array = audio_path_or_array + else: + raise ValueError( + "audio_path_or_array must be a string (path to audio file) or a numpy array" + ) + + # check dimentions and channels + if self.array.ndim > 2 or (self.array.ndim == 2 and self.array.shape[1] != 1): + raise ValueError( + "Audio must be mono in either the format (n_samples,) or (n_samples, 1)" + ) + + # reshape to (n_samples, 1) if needed + # TODO: is this needed? + self.array = self.array.reshape(-1, 1) + + @property + def _is_normalized(self) -> bool: + """Check if the array is already normalized.""" + return np.max(np.abs(self.array)) <= 1.0 + + @property + def normalized_array(self) -> np.ndarray: + return self.array / 32768.0 if not self._is_normalized else self.array + + def resample(self, target_sr: int): + """ + Resample the audio to the target sampling rate. In place operation. + """ + self.array = librosa.resample( + self.array, orig_sr=self.current_sr, target_sr=target_sr + ) + self.current_sr = target_sr + + @staticmethod + def unnormalize_array(arr, dtype=np.int16): + """ + Given a NumPy array normalized in `[-1, 1]`, returns an array rescaled + in `[-max, max]`, where `max` is the maximum (in absolute value) + (integer) number representable by the selected `dtype`. In practice, + we convert a normalized array of dtype `float32` into a normalized + one of dtype `int16`, as needed to create a PyDub `AudioSegment` + object. + """ + max_val = np.maximum(np.iinfo(dtype).max, np.abs(np.iinfo(dtype).min)) + + return (arr * max_val).astype(dtype) + + def to_pydub(self) -> pydub.AudioSegment: + """ + Converts audio to `pydub.AudioSegment`. + + Notes: + * In order to convert to PyDub `AudioSegment` type we need the + array to be + * of dtype int16, + * NOT normalized. + Therefore, if the array is normalized, we unnormalize it. + * In any case, PyDub only works with unnormalized arrays of dtype + int16, so that's what we need to pass as the input to + `AudioSegment`. + * Because we only manipulate mono audio, the array can either have + shape `(n_samples, 1)` or `(n_samples,)` (flat array). Either is + fine for PyDub (the extra dimension is taken care of + automatically for mono audio). + """ + if self._is_normalized: + unnormalized_array = self.unnormalize_array(self.array) + else: + unnormalized_array = self.array + + return pydub.AudioSegment( + unnormalized_array.tobytes(), + frame_rate=self.current_sr, + sample_width=unnormalized_array.dtype.itemsize, + channels=1, + ) + + +def transcribe_audio( + audio: np.ndarray, + # native_sr: int, + device, + batch_size: int, + compute_type: str, + language: str, + model_name_whisper: str, +) -> Tuple[str, List[Dict[str, Union[str, float]]]]: + """ + Transcribe audio using WhisperX, and return the text (transcription) and the words with their start and end times. + """ + + ## Load whisperx model. TODO: we should definitely avoid loading the model for *every* sample to subscribe + + device_type = device.type + device_index = device.index + + model_whisperx = whisperx.load_model( + model_name_whisper, + device=device_type, + device_index=device_index, + compute_type=compute_type, + language=language, + ) + + # required by whisperx + audio = audio.reshape( + -1, + ).astype(np.float32) + + result = model_whisperx.transcribe(audio, batch_size=batch_size) + model_a, metadata = whisperx.load_align_model( + language_code=result["language"], device=device_type + ) + model_a.to(device) + + ## Align timestamps + result = whisperx.align( + result["segments"], + model_a, + metadata, + audio, + device, + return_char_alignments=False, + ) + + if result is None or "segments" not in result or len(result["segments"]) == 0: + return "", [] + + if len(result["segments"]) == 1: + text = result["segments"][0]["text"] + words = result["segments"][0]["words"] + else: + text = " ".join( + result["segments"][i]["text"] for i in range(len(result["segments"])) + ) + words = [word for segment in result["segments"] for word in segment["words"]] + + # Remove words that are not properly transcribed + words = [word for word in words if "start" in word] + return text, words + + +def transcribe_audio_given_model( + model_whisperx, + audio_path: str, + batch_size: int = 2, + device: str = "cuda", +) -> Tuple[str, List[Dict[str, Union[str, float]]]]: + """ + Transcribe audio using whisperx, + and return the text (transcription) and the words with their start and end times. + """ + + ## Transcribe audio + audio = whisperx.load_audio(audio_path) + result = model_whisperx.transcribe(audio, batch_size=batch_size) + model_a, metadata = whisperx.load_align_model( + language_code=result["language"], device=device + ) + + ## Align timestamps + result = whisperx.align( + result["segments"], + model_a, + metadata, + audio, + device, + return_char_alignments=False, + ) + + if result is None or "segments" not in result or len(result["segments"]) == 0: + return "", [] + + if len(result["segments"]) == 1: + text = result["segments"][0]["text"] + words = result["segments"][0]["words"] + else: + text = " ".join( + result["segments"][i]["text"] for i in range(len(result["segments"])) + ) + words = [word for segment in result["segments"] for word in segment["words"]] + + # Remove words that are not properly transcribed + words = [word for word in words if "start" in word] + return text, words def pydub_to_np(audio: pydub.AudioSegment) -> Tuple[np.ndarray, int]: @@ -25,11 +239,6 @@ def pydub_to_np(audio: pydub.AudioSegment) -> Tuple[np.ndarray, int]: ) -def print_log(*args): - # This is just a wrapper to easily spot the print :) - I use it to debug - print(args) - - def plot_word_importance_summary( df_labels, top_k=15, @@ -244,9 +453,7 @@ def load_dataset_and_model(dataset_name, data_dir, model_dir=None, model_name=No ) from datasets import load_dataset - dataset_da = load_dataset( - "RiTA-nlp/ITALIC", "hard_speaker", use_auth_token=True - ) + dataset_da = load_dataset("RiTA-nlp/ITALIC", "hard_speaker", use_auth_token=True) dataset = pd.DataFrame( { diff --git a/pyproject.toml b/pyproject.toml index 7673e31..8709b80 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,12 +43,20 @@ lime = "^0.2.0.1" joblib = "^1.3.2" pytreebank = "^0.2.7" thermostat-datasets = "^1.1.0" +ipython = "^8.22.2" # Speech-XAI additional requirements to allow for `pip install ferret[speech]`. pydub = { version = "0.25.1", optional = true } audiomentations = { version = "0.34.1", optional = true } audiostretchy = { version = "1.3.5", optional = true } pyroomacoustics = { version = "0.7.3", optional = true } -whisperx = { version = "3.1.2", optional = true } +audio-effects = { version = "0.22", optional = true } + +# The version of WhisperX currently on PyPI has a problem with a dependency, +# so the dependency needs to be installed from the GitHub repo, which in turns +# prevents it from being used among the extras in pyproject.toml. Until a +# working version of WhisperX is released, the users are required to install +# it from the repo manually with: `pip install git+https://github.com/m-bain/whisperx.git` +# whisperx = { version = "3.1.2", optional = true } [tool.poetry.extras] speech = [ @@ -56,14 +64,16 @@ speech = [ "audiomentations", "audiostretchy", "pyroomacoustics", - "whisperx" + "audio-effects" + # "whisperx" ] all = [ "pydub", "audiomentations", "audiostretchy", "pyroomacoustics", - "whisperx" + "audio-effects" + # "whisperx" ]