From e5dc2978517428fd96ac96de94e07d6a57b04075 Mon Sep 17 00:00:00 2001 From: Giuseppe Attanasio Date: Wed, 6 Mar 2024 18:37:10 +0000 Subject: [PATCH 01/21] [WIP] initial implementation to support audio processing as arrays --- ferret/benchmark_speech.py | 49 ++++++++++------- .../loo_speech_explainer.py | 41 +++++++------- .../explanation_speech/utils_removal.py | 13 +++-- ferret/speechxai_utils.py | 55 ++++++++++++++++--- 4 files changed, 107 insertions(+), 51 deletions(-) diff --git a/ferret/benchmark_speech.py b/ferret/benchmark_speech.py index 9110c50..7537c1a 100644 --- a/ferret/benchmark_speech.py +++ b/ferret/benchmark_speech.py @@ -6,11 +6,15 @@ import seaborn as sns from IPython.display import display from .explainers.explanation_speech.loo_speech_explainer import LOOSpeechExplainer -from .explainers.explanation_speech.gradient_speech_explainer import GradientSpeechExplainer +from .explainers.explanation_speech.gradient_speech_explainer import ( + GradientSpeechExplainer, +) from .explainers.explanation_speech.lime_speech_explainer import LIMESpeechExplainer -from .explainers.explanation_speech.paraling_speech_explainer import ParalinguisticSpeechExplainer +from .explainers.explanation_speech.paraling_speech_explainer import ( + ParalinguisticSpeechExplainer, +) from .explainers.explanation_speech.explanation_speech import ExplanationSpeech -from .speechxai_utils import pydub_to_np, print_log +from .speechxai_utils import pydub_to_np, print_log, FerretAudio SCORES_PALETTE = sns.diverging_palette(240, 10, as_cmap=True) @@ -48,7 +52,9 @@ def __init__( self.model, self.feature_extractor, self.device, "en" ) elif "ITALIC" in self.model.name_or_path: - from .modeling.speech_model_helpers.model_helper_italic import ModelHelperITALIC + from .modeling.speech_model_helpers.model_helper_italic import ( + ModelHelperITALIC, + ) self.model_helper = ModelHelperITALIC( self.model, self.feature_extractor, self.device, "it" @@ -88,7 +94,8 @@ def predict( def explain( self, - audio_path: str, + audio_path_or_array: Union[str, np.ndarray], + native_sr: int = None, target_class: str = None, methodology: str = "LOO", perturbation_types: List[str] = [ @@ -112,16 +119,19 @@ def explain( Explain the prediction of the model. Returns the importance of each segment in the audio. """ - explainer_args = {} + explainer_args = dict() # TODO UNIFY THE INPUT FORMAT + # First things first. We transform any type of input in a suitable numpy array and we proceed with that on. + ferret_audio = FerretAudio(audio_path_or_array, native_sr=native_sr) + ## Get the importance of each class (action, object, location) according to the perturb_paraling type if methodology == "perturb_paraling": explanations = [] explainer = self.explainers["perturb_paraling"] for perturbation_type in perturbation_types: explanation = explainer.compute_explanation( - audio_path=audio_path, + audio=ferret_audio, target_class=target_class, perturbation_type=perturbation_type, verbose=verbose, @@ -136,7 +146,7 @@ def explain( else: if methodology not in self.explainers: raise ValueError( - f'Explainer {methodology} not supported. Choose between ' + f"Explainer {methodology} not supported. Choose between " '"LOO", "Gradient", "GradientXInput", "LIME", ' '"perturb_paraling"' ) @@ -151,7 +161,7 @@ def explain( explainer = self.explainers[methodology] explanation = explainer.compute_explanation( - audio_path=audio_path, + audio=ferret_audio, target_class=target_class, words_trascript=words_trascript, **explainer_args, @@ -185,9 +195,7 @@ def create_table( if explanations[i].target != explanations[i + 1].target ] == [], "The explanations must have the same target class" assert [ - True - for explanation in explanations - if len(explanation.features) > 1 + True for explanation in explanations if len(explanation.features) > 1 ] == [], "The explanation feature should only be one" importance_df = pd.DataFrame( [explanation.scores for explanation in explanations] @@ -240,10 +248,13 @@ def show_table(self, explanations, apply_style: bool = True, decimals=4): else table.apply(pd.to_numeric).style.format(precision=decimals) ) - def explain_variations(self, audio_path, perturbation_types, target_class=None): - perturbation_df_by_type = self.explainers[ - "perturb_paraling" - ].explain_variations(audio_path, perturbation_types, target_class) + def explain_variations( + self, audio_path_or_array, perturbation_types, target_class=None + ): + # TODO GA: we will probably need to update to the new FerretAudio class here as well + perturbation_df_by_type = self.explainers["perturb_paraling"].explain_variations( + audio_path_or_array, perturbation_types, target_class + ) return perturbation_df_by_type def plot_variations(self, perturbation_df_by_type, show_diff=False, figsize=(5, 5)): @@ -327,9 +338,7 @@ def plot_variations(self, perturbation_df_by_type, show_diff=False, figsize=(5, ax.set_xlabel( "signal-to-noise ratio (dB)", fontsize=label_size, labelpad=-2 ) - ax.set_xticks( - np.arange(len(x_labels)), labels=x_labels, fontsize=label_size - ) + ax.set_xticks(np.arange(len(x_labels)), labels=x_labels, fontsize=label_size) ax.set_title(perturbation_type, fontsize=label_size) @@ -348,4 +357,4 @@ def plot_variations(self, perturbation_df_by_type, show_diff=False, figsize=(5, cbar.ax.tick_params(labelsize=label_size) plt.show() - return fig \ No newline at end of file + return fig diff --git a/ferret/explainers/explanation_speech/loo_speech_explainer.py b/ferret/explainers/explanation_speech/loo_speech_explainer.py index 31a6d7f..5ac99d7 100644 --- a/ferret/explainers/explanation_speech/loo_speech_explainer.py +++ b/ferret/explainers/explanation_speech/loo_speech_explainer.py @@ -1,11 +1,15 @@ """LOO Speech Explainer module""" + import numpy as np from typing import Dict, List, Union, Tuple from pydub import AudioSegment from IPython.display import display from .explanation_speech import ExplanationSpeech from .utils_removal import transcribe_audio, remove_word -from ...speechxai_utils import pydub_to_np, print_log +from ...speechxai_utils import pydub_to_np, FerretAudio +from logging import getLogger + +logger = getLogger(__name__) class LOOSpeechExplainer: @@ -16,7 +20,7 @@ def __init__(self, model_helper): def remove_words( self, - audio_path: str, + audio: FerretAudio, removal_type: str = "nothing", words_trascript: List = None, display_audio: bool = False, @@ -30,10 +34,10 @@ def remove_words( """ ## Transcribe audio - + # TODO GA: transcribing audio might be an operation need by other explainers. I suggest we move it into FerretAudio or somewhere else such that can be done once and then shared (e.g., a method in the SpeechBenchmark class) if words_trascript is None: text, words_trascript = transcribe_audio( - audio_path=audio_path, + audio=audio, device=self.model_helper.device.type, batch_size=2, compute_type="float32", @@ -41,25 +45,25 @@ def remove_words( ) ## Load audio as pydub.AudioSegment - audio = AudioSegment.from_wav(audio_path) + pydub_segment = audio.to_pydub() ## Remove word - audio_no_words = [] + audio_no_words = list() for word in words_trascript: - audio_removed = remove_word(audio, word, removal_type) + audio_removed = remove_word(pydub_segment, word, removal_type) audio_no_words.append(pydub_to_np(audio_removed)[0]) if display_audio: - print_log(word["word"]) + print(word["word"]) display(audio_removed) return audio_no_words, words_trascript def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, removal_type: str = None, words_trascript: List = None, @@ -70,19 +74,22 @@ def compute_explanation( ## Get modified audio by leaving a single word out and the words modified_audios, words = self.remove_words( - audio_path, removal_type, words_trascript=words_trascript + audio, removal_type, words_trascript=words_trascript ) logits_modified = self.model_helper.predict(modified_audios) - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] + # GA: we don't need this conversion as we already have the numpy audio array in FerretAudio + # audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] + audio_array = audio.array - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels # TODO + # TODO GA: what? if target_class is not None: targets = target_class @@ -100,9 +107,7 @@ def compute_explanation( if n_labels > 1: # Multilabel scenario as for FSC modified_trg = [logits_modified[i][:, targets[i]] for i in range(n_labels)] - original_gt = [ - logits_original[i][:, targets[i]][0] for i in range(n_labels) - ] + original_gt = [logits_original[i][:, targets[i]][0] for i in range(n_labels)] else: modified_trg = logits_modified[:, targets] @@ -112,9 +117,7 @@ def compute_explanation( if n_labels > 1: # Multilabel scenario as for FSC - prediction_diff = [ - original_gt[i] - modified_trg[i] for i in range(n_labels) - ] + prediction_diff = [original_gt[i] - modified_trg[i] for i in range(n_labels)] else: prediction_diff = [original_gt - modified_trg] @@ -125,7 +128,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else [targets], - audio_path=audio_path, + audio=audio, # TODO GA: I don't know if this is something we want to keep ) return explanation diff --git a/ferret/explainers/explanation_speech/utils_removal.py b/ferret/explainers/explanation_speech/utils_removal.py index 5cf7126..785c9e8 100644 --- a/ferret/explainers/explanation_speech/utils_removal.py +++ b/ferret/explainers/explanation_speech/utils_removal.py @@ -2,6 +2,7 @@ import whisperx import os from typing import Dict, List, Union, Tuple +from ...speechxai_utils import FerretAudio def remove_specified_words(audio, words, removal_type: str = "nothing"): @@ -51,7 +52,7 @@ def remove_specified_words(audio, words, removal_type: str = "nothing"): def transcribe_audio( - audio_path: str, + audio: FerretAudio, device: str = "cuda", batch_size: int = 2, compute_type: str = "float32", @@ -72,8 +73,10 @@ def transcribe_audio( ) ## Transcribe audio - audio = whisperx.load_audio(audio_path) - result = model_whisperx.transcribe(audio, batch_size=batch_size) + # TODO: we are assuming that the array does not come already normalized + audio_array = audio.normalized_array + + result = model_whisperx.transcribe(audio_array, batch_size=batch_size) model_a, metadata = whisperx.load_align_model( language_code=result["language"], device=device ) @@ -83,7 +86,7 @@ def transcribe_audio( result["segments"], model_a, metadata, - audio, + audio_array, device, return_char_alignments=False, ) @@ -170,6 +173,8 @@ def remove_word(audio, word, removal_type: str = "nothing"): after_word_audio = audio[word["end"] * 1000 + b :] word_duration = (word["end"] * 1000 - word["start"] * 1000) + a + b + # TODO GA: we don't really to use pydub here, we can use numpy directly + if removal_type == "nothing": replace_word_audio = AudioSegment.empty() elif removal_type == "silence": diff --git a/ferret/speechxai_utils.py b/ferret/speechxai_utils.py index 0c28560..3ca7814 100644 --- a/ferret/speechxai_utils.py +++ b/ferret/speechxai_utils.py @@ -7,6 +7,52 @@ import torch from datasets import Dataset from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor +import librosa +from typing import Union + + +class FerretAudio: + """ + Internal class to handle audio data. We force signal to 1) mono, 2) a sampling rate of 16000, 3) np.float32 (i.e., 4 bytes to represent each sample). + We infer the native sampling rate using librosa. + """ + + def __init__( + self, audio_path_or_array: Union[str, np.ndarray], native_sr: int = None + ): + self.target_sr = 16000 + self.native_sr = native_sr + self.audio_path_or_array = audio_path_or_array + + if isinstance(audio_path_or_array, str): + self.native_sr = librosa.get_samplerate(audio_path_or_array) + self.array, self.sample_rate = librosa.load( + audio_path_or_array, sr=self.target_sr, dtype=np.float32 + ) + + elif isinstance(audio_path_or_array, np.ndarray): + if native_sr is None: + raise ValueError( + "If audio is provided as a numpy array, native_sr must be provided" + ) + self.array, self.sample_rate = librosa.resample( + audio_path_or_array, self.native_sr, self.target_sr + ) + + @property + def normalized_array(self) -> np.ndarray: + return self.array / 32768.0 + + def to_pydub(self) -> pydub.AudioSegment: + """ + Converts audio to pydub.AudioSegment. + """ + return pydub.AudioSegment( + self.array.tobytes(), + frame_rate=self.target_sr, + sample_width=self.array.dtype.itemsize, + channels=1, + ) def pydub_to_np(audio: pydub.AudioSegment) -> Tuple[np.ndarray, int]: @@ -25,11 +71,6 @@ def pydub_to_np(audio: pydub.AudioSegment) -> Tuple[np.ndarray, int]: ) -def print_log(*args): - # This is just a wrapper to easily spot the print :) - I use it to debug - print(args) - - def plot_word_importance_summary( df_labels, top_k=15, @@ -244,9 +285,7 @@ def load_dataset_and_model(dataset_name, data_dir, model_dir=None, model_name=No ) from datasets import load_dataset - dataset_da = load_dataset( - "RiTA-nlp/ITALIC", "hard_speaker", use_auth_token=True - ) + dataset_da = load_dataset("RiTA-nlp/ITALIC", "hard_speaker", use_auth_token=True) dataset = pd.DataFrame( { From ed15ea15e622fd4c1a295a361e265b8c3db8947e Mon Sep 17 00:00:00 2001 From: Gaia Geagea Date: Fri, 15 Mar 2024 08:44:40 +0100 Subject: [PATCH 02/21] proposed changes to integrate numpy arrays --- ferret/benchmark_speech.py | 2 +- .../gradient_speech_explainer.py | 16 +++--- .../lime_speech_explainer.py | 19 +++---- .../loo_speech_explainer.py | 15 +++--- .../explanation_speech/utils_removal.py | 51 +++++++++++++++++-- ferret/speechxai_utils.py | 33 +++++++++++- 6 files changed, 100 insertions(+), 36 deletions(-) diff --git a/ferret/benchmark_speech.py b/ferret/benchmark_speech.py index 7537c1a..451f089 100644 --- a/ferret/benchmark_speech.py +++ b/ferret/benchmark_speech.py @@ -123,7 +123,7 @@ def explain( # TODO UNIFY THE INPUT FORMAT # First things first. We transform any type of input in a suitable numpy array and we proceed with that on. - ferret_audio = FerretAudio(audio_path_or_array, native_sr=native_sr) + ferret_audio = FerretAudio(audio_path_or_array, native_sr=native_sr, model_helper=self.explainers.model_helper) ## Get the importance of each class (action, object, location) according to the perturb_paraling type if methodology == "perturb_paraling": diff --git a/ferret/explainers/explanation_speech/gradient_speech_explainer.py b/ferret/explainers/explanation_speech/gradient_speech_explainer.py index a3b8efe..f1cc22b 100644 --- a/ferret/explainers/explanation_speech/gradient_speech_explainer.py +++ b/ferret/explainers/explanation_speech/gradient_speech_explainer.py @@ -4,10 +4,8 @@ import numpy as np import torch from .explanation_speech import ExplanationSpeech -from ...speechxai_utils import pydub_to_np +from ...speechxai_utils import pydub_to_np, FerretAudio # TODO - include in utils -from .loo_speech_explainer import transcribe_audio - class GradientSpeechExplainer: NAME = "Gradient" @@ -58,7 +56,7 @@ def _get_gradient_importance_frame_level( def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, words_trascript: List = None, no_before_span: bool = True, @@ -79,10 +77,10 @@ def compute_explanation( ) # Load audio and convert to np.array - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] + audio_array = audio.array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -103,9 +101,7 @@ def compute_explanation( if words_trascript is None: # Transcribe audio - _, words_trascript = transcribe_audio( - audio_path=audio_path, language=self.model_helper.language - ) + words_trascript = audio.transcribe # Compute gradient importance for each target label # This also handles the multilabel scenario as for FSC @@ -181,7 +177,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "-" + aggregation, target=targets if n_labels > 1 else targets, - audio_path=audio_path, + audio_path=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/lime_speech_explainer.py b/ferret/explainers/explanation_speech/lime_speech_explainer.py index 18fe41c..4a383ac 100644 --- a/ferret/explainers/explanation_speech/lime_speech_explainer.py +++ b/ferret/explainers/explanation_speech/lime_speech_explainer.py @@ -4,7 +4,7 @@ from .lime_timeseries import LimeTimeSeriesExplainer from .utils_removal import transcribe_audio from .explanation_speech import ExplanationSpeech -from ...speechxai_utils import pydub_to_np +from ...speechxai_utils import pydub_to_np, FerretAudio EMPTY_SPAN = "---" @@ -17,7 +17,7 @@ def __init__(self, model_helper): def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, words_trascript: List = None, removal_type: str = "silence", @@ -37,10 +37,9 @@ def compute_explanation( ) # Load audio and convert to np.array - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] - + audio_array = audio.array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -61,13 +60,11 @@ def compute_explanation( if words_trascript is None: # Transcribe audio - _, words_trascript = transcribe_audio( - audio_path=audio_path, language=self.model_helper.language - ) - audio_np = audio.reshape(1, -1) + words_trascript = audio.transcribe + audio_np = audio_array.reshape(1, -1) # Get the start and end indexes of the words. These will be used to split the audio and derive LIME interpretable features - tot_len = audio.shape[0] + tot_len = audio_array.shape[0] sampling_rate = self.model_helper.feature_extractor.sampling_rate splits = [] old_start = 0 @@ -143,7 +140,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else targets, - audio_path=audio_path, + audio_path=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/loo_speech_explainer.py b/ferret/explainers/explanation_speech/loo_speech_explainer.py index 5ac99d7..9b1a704 100644 --- a/ferret/explainers/explanation_speech/loo_speech_explainer.py +++ b/ferret/explainers/explanation_speech/loo_speech_explainer.py @@ -5,7 +5,7 @@ from pydub import AudioSegment from IPython.display import display from .explanation_speech import ExplanationSpeech -from .utils_removal import transcribe_audio, remove_word +from .utils_removal import remove_word, remove_word_np from ...speechxai_utils import pydub_to_np, FerretAudio from logging import getLogger @@ -35,14 +35,9 @@ def remove_words( ## Transcribe audio # TODO GA: transcribing audio might be an operation need by other explainers. I suggest we move it into FerretAudio or somewhere else such that can be done once and then shared (e.g., a method in the SpeechBenchmark class) + # transcription moved to the FerretAudio Class if words_trascript is None: - text, words_trascript = transcribe_audio( - audio=audio, - device=self.model_helper.device.type, - batch_size=2, - compute_type="float32", - language=self.model_helper.language, - ) + words_trascript = audio.transcribe ## Load audio as pydub.AudioSegment pydub_segment = audio.to_pydub() @@ -53,6 +48,10 @@ def remove_words( for word in words_trascript: audio_removed = remove_word(pydub_segment, word, removal_type) + # to use remove_word_np after implementing the numpy array version of pink noise and white noise + # audio_removed = remove_word_np(audio.array, audio.sample_rate, word, removal_type ) + # audio_no_words.append(audio_removed) + audio_no_words.append(pydub_to_np(audio_removed)[0]) if display_audio: diff --git a/ferret/explainers/explanation_speech/utils_removal.py b/ferret/explainers/explanation_speech/utils_removal.py index 785c9e8..37249b4 100644 --- a/ferret/explainers/explanation_speech/utils_removal.py +++ b/ferret/explainers/explanation_speech/utils_removal.py @@ -1,6 +1,7 @@ from pydub import AudioSegment import whisperx import os +import numpy as np from typing import Dict, List, Union, Tuple from ...speechxai_utils import FerretAudio @@ -52,7 +53,7 @@ def remove_specified_words(audio, words, removal_type: str = "nothing"): def transcribe_audio( - audio: FerretAudio, + audio: np.ndarray, device: str = "cuda", batch_size: int = 2, compute_type: str = "float32", @@ -74,9 +75,10 @@ def transcribe_audio( ## Transcribe audio # TODO: we are assuming that the array does not come already normalized - audio_array = audio.normalized_array + # audio_array = audio.normalized_array + # The normalization occurs in the FerretAudio Class - result = model_whisperx.transcribe(audio_array, batch_size=batch_size) + result = model_whisperx.transcribe(audio, batch_size=batch_size) model_a, metadata = whisperx.load_align_model( language_code=result["language"], device=device ) @@ -86,7 +88,7 @@ def transcribe_audio( result["segments"], model_a, metadata, - audio_array, + audio, device, return_char_alignments=False, ) @@ -191,3 +193,44 @@ def remove_word(audio, word, removal_type: str = "nothing"): audio_removed = before_word_audio + replace_word_audio + after_word_audio return audio_removed + + +def remove_word_np(audio_array, sr, word, removal_type: str = "nothing"): + """ + Remove a word from audio as an array, by replacing it with: + - nothing + - silence + - white noise + - pink noise + + Args: + audio_array (np.ndarray): audio_array + sr : sample rate of audio + word: word to remove with its start and end times + removal_type (str, optional): type of removal. Defaults to "nothing". + """ + + a, b = 100, 40 + + start = int((word["start"] * 1000 - a) * sr / 1000) + end = int((word["end"] * 1000 + b) * sr / 1000) + before_word_audio = audio_array[:start] + after_word_audio = audio_array[end:] + word_duration = (end - start) + a + b + + if removal_type == "nothing": + replace_word_audio = np.array([], dtype=audio_array.dtype) + + elif removal_type == "silence": + replace_word_audio = np.zeros(word_duration, dtype=audio_array.dtype) + + elif removal_type == "pink noise": + pass # to change the pink_noise.mp3 to a numpy array + + elif removal_type == "white noise": + pass # to change the white_noise.mp3 tp a numpy array + + audio_removed = np.concatenate( + [before_word_audio, replace_word_audio, after_word_audio] + ) + return audio_removed diff --git a/ferret/speechxai_utils.py b/ferret/speechxai_utils.py index 3ca7814..087e1c6 100644 --- a/ferret/speechxai_utils.py +++ b/ferret/speechxai_utils.py @@ -9,6 +9,7 @@ from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor import librosa from typing import Union +from explainers.explanation_speech.utils_removal import transcribe_audio class FerretAudio: @@ -18,11 +19,16 @@ class FerretAudio: """ def __init__( - self, audio_path_or_array: Union[str, np.ndarray], native_sr: int = None + self, + audio_path_or_array: Union[str, np.ndarray], + native_sr: int = None, + model_helper=None, ): self.target_sr = 16000 self.native_sr = native_sr self.audio_path_or_array = audio_path_or_array + self.model_helper = model_helper + self._transcribe = None if isinstance(audio_path_or_array, str): self.native_sr = librosa.get_samplerate(audio_path_or_array) @@ -39,10 +45,33 @@ def __init__( audio_path_or_array, self.native_sr, self.target_sr ) + @property + def is_normalized(self) -> bool: + """Check if the array is already normalized.""" + return np.max(np.abs(self.array)) <= 1.0 + @property def normalized_array(self) -> np.ndarray: - return self.array / 32768.0 + if not self.is_normalized: + return self.array / 32768.0 + else: + return self.array + @property + def transcribe(self): + if self._transcribe is None: + if self.model_helper and hasattr(self.model_helper, 'device') and hasattr(self.model_helper, 'language'): + _ , self._transcribe = transcribe_audio( + audio=self.normalized_array, # is normalization needed when transcribing? i am assumimg so + device=self.model_helper.device.type, + batch_size=2, + compute_type="float32", + language=self.model_helper.language, + ) + else: + raise AttributeError("model_helper is not correctly configured") + return self._transcribe + def to_pydub(self) -> pydub.AudioSegment: """ Converts audio to pydub.AudioSegment. From 2ff3f2a8fc275c8a4bb2815e719120e2a7841ec0 Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Fri, 15 Mar 2024 14:47:52 +0100 Subject: [PATCH 03/21] Attach console handler to logger, fix bugs, move audio transcription to the speechxai_utils.py module --- examples/speech/getting_started.ipynb | 647 +----------------- ferret/__init__.py | 34 +- ferret/benchmark_speech.py | 9 +- .../faithfulness_measures_speech.py | 11 +- .../gradient_equal_width_explainer.py | 2 - .../equal_width/lime_equal_width_explainer.py | 1 - .../lime_speech_explainer.py | 3 +- .../paraling_speech_explainer.py | 12 +- .../explanation_speech/utils_removal.py | 106 --- ferret/speechxai_utils.py | 110 ++- pyproject.toml | 12 +- 11 files changed, 191 insertions(+), 756 deletions(-) diff --git a/examples/speech/getting_started.ipynb b/examples/speech/getting_started.ipynb index 5af5944..b8503e2 100644 --- a/examples/speech/getting_started.ipynb +++ b/examples/speech/getting_started.ipynb @@ -9,7 +9,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -19,19 +19,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/moscato/miniconda3/envs/speechxai-ferret-integration-2/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n", - "torchvision is not available - cannot save figures\n" - ] - } - ], + "outputs": [], "source": [ "from datasets import Dataset\n", "from IPython.display import display\n", @@ -66,7 +56,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -89,17 +79,9 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "cuda:0\n" - ] - } - ], + "outputs": [], "source": [ "device_str = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n", "device = torch.device(device_str)\n", @@ -109,21 +91,9 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Some weights of the model checkpoint at superb/wav2vec2-base-superb-ic were not used when initializing Wav2Vec2ForSequenceClassification: ['wav2vec2.encoder.pos_conv_embed.conv.weight_g', 'wav2vec2.encoder.pos_conv_embed.conv.weight_v']\n", - "- This IS expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", - "- This IS NOT expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", - "Some weights of Wav2Vec2ForSequenceClassification were not initialized from the model checkpoint at superb/wav2vec2-base-superb-ic and are newly initialized: ['wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']\n", - "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" - ] - } - ], + "outputs": [], "source": [ "## Load model\n", "model = Wav2Vec2ForSequenceClassification.from_pretrained(\n", @@ -146,7 +116,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -156,27 +126,9 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "## Example\n", "# 'transcription': 'Turn up the bedroom heat.'\n", @@ -207,127 +159,12 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 Turnupthebedroomheat.
action=increase0.2510.5450.2430.1300.021
object=heat-0.000-0.000-0.0000.0140.412
location=bedroom0.0020.0060.0820.9970.242
\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "explanation = benchmark.explain(\n", - " audio_path=audio_path, \n", + " audio_path_or_array=audio_path, \n", " methodology='LOO'\n", ")\n", "\n", @@ -336,128 +173,9 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 Turnupthebedroomheat.
action=increase0.1550.2730.1170.2810.149
object=heat0.0550.0150.065-0.0070.211
location=bedroom-0.065-0.0050.2530.7070.036
\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "explanation = benchmark.explain(\n", " audio_path=audio_path, \n", @@ -469,51 +187,9 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" - ] - }, - { - "data": { - "text/plain": [ - "(EvaluationSpeech(name='aopc_compr_speech', score=[0.8124997764127329, 0.14093613624572754, 0.9970740624897493], target=[3, 4, 2]),\n", - " EvaluationSpeech(name='aopc_suff', score=[0.624854679661803, 0.01358117163181305, 0.10568535327911377], target=[3, 4, 2]))" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "aopc_compr = AOPC_Comprehensiveness_Evaluation_Speech(benchmark.model_helper)\n", "evaluation_output_c = aopc_compr.compute_evaluation(explanation)\n", @@ -540,7 +216,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -549,25 +225,9 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.0.post0. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.0+cu121. Bad things might happen unless you revert torch to 1.x.\n" - ] - } - ], + "outputs": [], "source": [ "text, words_trascript = transcribe_audio(\n", " audio_path=audio_path,\n", @@ -580,109 +240,9 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 Turnupthebedroomheat.
action=increase0.2510.5450.2430.1300.021
object=heat-0.000-0.000-0.0000.0140.412
location=bedroom0.0020.0060.0820.9970.242
\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "explanation = benchmark.explain(\n", " audio_path=audio_path, \n", @@ -703,149 +263,9 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 pitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoise
action=increase0.080.040.130.110.190.040.750.44
object=heat0.02-0.000.040.000.000.000.000.29
location=bedroom0.220.130.330.020.030.010.420.60
\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "explain_table = benchmark.explain(\n", " audio_path=audio_path,\n", @@ -863,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -876,20 +296,9 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcUAAAGZCAYAAAD4jSoIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAACZvklEQVR4nOzdd1xV9f/A8dcF4YIXZIgDFHGiKG7NLQg40NRMcZZilpnmwj0KtRylJmX5teFX86uWojhSSTIhM3HUT8tclRtHLoYLFPj8/iBOXtnIlPezx3nkPffz+ZzPORfum885n6FTSimEEEIIgUlhV0AIIYQoKiQoCiGEEP+QoCiEEEL8Q4KiEEII8Q8JikIIIcQ/JCgKIYQQ/5CgKIQQQvxDgqIQQgjxDwmKQgghxD8kKIoiZ8uWLSxbtizNfn9/f9zd3QuhRtkXExPDrFmzOHHiRJ6Wm9E1ycr58+fR6XRs3Lgx03SzZs3Cysoqt9UT4pkhQVEUORkFgLfeeot169YVQo2yLyYmhtmzZxeZoJhdr776KuHh4flWvhDFRanCroAQ2VWjRo3CrkKeevDgAZaWloVdDQAqV65M5cqVC7saQhQ6aSmKIsXf358vv/yS48ePo9Pp0Ol0+Pv7a+89fvt01apV6HQ6fv75Zzp16kTp0qWpXbs2u3fvJjk5mZkzZ1KhQgUqVKjAtGnTSE5ONjrWyZMn6dmzJzY2NhgMBrp168aZM2eyrOOCBQuoWbMmFhYWlCtXDh8fH86dO8f58+epVq0aAH5+flr9z58/r93GXLVqFa+99hply5blueeeAyAhIYHp06fj4uKCXq/Hzc3NqEWc2TUBiIyMpFOnTpQpUwZra2tatGjBd999Z1Tn+Ph43nzzTezs7HB0dGTixIkkJiZq7z95+zQiIgKdTsd3333HwIEDsba2xsXFhffffz/N9fj0009xcXGhdOnSdOzYkSNHjmjnKkRxIy1FUaS89dZb3Lhxg1OnTrF27VoAypUrl2mewYMHM2LECCZMmMCCBQt48cUXGTJkCHFxcaxevZqDBw8SGBhI/fr1GThwIABnz56ldevWuLu7s2rVKkxMTJg7dy7e3t6cPn0avV6f7rFWr17NW2+9xZw5c2jVqhWxsbH8+OOPxMXFUadOHUJCQnjxxReZN28eHTp0AMDR0ZGrV68CMG3aNLp168ZXX32lBem+ffuyb98+AgMDcXNzY+fOnbz00kvY2dnh6+ub6TX56aef8PLyomXLlnzxxRfY2try888/c/HiRaN6z5gxg549e7Jhwwb279/PrFmzqFmzJiNGjMj02o4YMYKXX36ZzZs3s2XLFqZMmUKDBg3o0qULANu2bWPEiBG8+uqr9OnTh6NHj9K3b99MyxSiSFNCFDFDhgxR9erVy3L/ypUrFaCWLVum7Tt27JgCVMuWLY3yNm3aVL3wwgva68GDB6vq1aurBw8eaPuuX7+urKys1CeffJJh3UaNGqWaNGmS4fvnzp1TgAoODk53f5cuXYz279mzRwFq165dRvv79eunmjdvnuG5p2rdurWqW7euSkxMzLQ+fn5+Rvs9PDyUt7e39jowMFAZDAbtdXh4uALUpEmTtH3JycmqatWqatiwYdq+5s2bKy8vL6Oy33nnHQWolStXplsnIYoyuX0qir2OHTtq/3Z1dQXA29vbKI2rqyuXLl3SXoeFhdGjRw9KlSpFYmIiiYmJ2NnZ0bhxYw4fPpzhsZo0acKRI0cICAhg3759PHr0KEd17datm9HrsLAw7O3t8fLy0uqRmJio3YZMSkrKsKz79+9z4MABhgwZgqmpaabH7dSpk9HrunXrEhUVlWV9H8+n0+lwc3PT8iUlJXHkyBF69OhhlKdnz55ZlitEUSVBURR7tra22r/Nzc3T7EvdHx8fr72+efMmQUFBmJmZGW0//vijUfB8kr+/P0uWLGHXrl20a9eOcuXKMXbsWB48eJCtulaoUMHo9c2bN7l9+3aaerz66qskJiZqt13TEx0dTXJyMk5OTlkeN6vrkZt8N27cIDExMc3t7fLly2dZrhBFlTxTFCWSvb093bp1Y+TIkWnes7a2zjCfiYkJY8eOZezYsVy+fJmvv/6aqVOn4uDgwFtvvZXlcXU6XZp6lCtXjp07d6abPrMAY2tri4mJCVeuXMnyuPmhXLlylCpVihs3bhjtv379eqHUR4i8IEFRFDnZbcU8DR8fH37//XcaN26c5a3HjFSqVIkJEyawbt06Tp48CfzbUs1u/X18fHj//fcxNzenQYMGGaZL75oYDAZatWrF6tWrmTBhQq7PI7dMTU1p3LgxW7duZezYsdr+LVu2FGg9hMhLEhRFkePm5sZ///tfvvrqK2rVqoWDgwNVq1bN02PMnj2b5s2b07lzZ4YPH06FChW4du0aP/zwA+3atWPAgAHp5nv99dexs7OjZcuW2NnZ8dNPP/Hrr79qLc6KFStia2vLV199RbVq1dDr9ZkGu44dO9K9e3e6dOnC5MmTadCgAffu3eP48eP89ddffPHFF5lekwULFuDl5YWPjw8jR47Ezs6O//u//8PBwYFXXnklT69ZembOnEnPnj157bXX8PPz48iRI3z55ZdASqtaiOJGfmpFkTNs2DD8/PwYPXo0zZs3Z9asWXl+jJo1a3Lo0CHKli3LyJEj6dy5M1OnTuXevXuZBrHWrVuzb98+hg0bRpcuXVi7di1Llixh2LBhQEogWLlyJefOncPb25vmzZtneXtz48aNjBgxgmXLluHr68uwYcMICwvDw8NDS5PRNWnbtq02ptDf358XX3yRzZs34+Li8vQXKRt69OjBf/7zH3bt2kXPnj0JDQ3lP//5DwA2NjYFUgch8pJOKaUKuxJCiGfHihUrePXVVzl37lyet/CFyG9y+1QIkWu3b99m9uzZeHl5YW1tzeHDh5k7dy49e/aUgCiKJQmKQohcMzMz48yZM6xbt46YmBjKlSvHyy+/zHvvvVfYVRMiV+T2qRBCCPEP6WgjhBBC/EOCohBCCPEPeaaYj5KTk7ly5QrW1tZpZjIRQognKaW4c+cOTk5OMs6zkEhQzEdXrlzB2dm5sKshhChmLl26JIs+FxIJivkodQ7NDQ4jKW2S/vp8GWm47OVcHdOySu4GTKtyFrnKB6BLzl1frSTLXP74lYA/oE3vJmadKB1JpXN3TXP7GRY0ZVrwd1x0Sbm7Nrmp6507d6jpWivT+XdF/nqqoOjv78/PP//M77//nlf1eaak3jItbaLHkMOgWKa0VdaJ0mFplbtfJlXGMlf5IPdfGrn9Ai8RQVGXy6BokKCY1woyKGrHlMctheapguJbb73FvXv38qouQgghRKF6qqBYo0aNvKpHph48eIClZe5bMkIIIUR2PNWNKH9/f9zd3QFYtWoVOp2OI0eO4Ovri8FgoFatWqxevTpNvh07dtCmTRtKly6NnZ0dnp6eHDlyBECb3HjHjh306dOHMmXK4OfnB0BMTAwjR47E0dERvV5P06ZNCQsLS1N2x44dKV++PGXKlKFFixZ8++23RmliYmJ47bXXqFSpEhYWFjg7O9O/f3+jNFFRUbz00ks4ODhgaWlJ+/bt+eWXX57mcgkhhCji8vzpzKBBg+jUqRNbtmyhcePG+Pv7a2vNAaxfv57u3btTvnx51q1bx9q1a2nTpg2XL182Kmf48OHUqFGDzZs3M3HiRB4+fEjHjh3Zvn07c+fOZdu2bdStW5du3bpx7NgxLd+5c+fo3r07//vf/9i0aRNt2rSha9euREREaGkCAgLYvn078+bNY9euXSxcuBC9/t9nftHR0bRt25ajR4+ydOlSNm3ahMFgwMvLK9MFVBMSEoiLizPahBBCFB953vv0zTff1NaWa926NTt27GDTpk3MnDkTpRQTJ06kU6dObN68WcvTtWvXNOX06NHDaP7ElStXcvToUX799Vfq1q0LQOfOnfnzzz9555132LBhg3b8VMnJyXTo0IHjx4/z2Wef4enpCcChQ4cYOHAgQ4YM0dI+3lIMCgoiJiaGQ4cOaSufe3t74+rqyqJFi3j//ffTPff58+cze/bsHF0vIYQQRUeetxQ7deqk/dtgMODi4kJUVBQAp0+fJioqKluLn3br1s3odVhYGPXr18fV1ZXExERt69ixI4cPH9bSRUVFMWTIECpVqkSpUqUwMzMjLCyMP/74Q0vTpEkTVq1axaJFi9LtORsWFkaHDh2wt7fXjmNqaoqHh4fRsZ40bdo0YmNjte3SpUtZnqcQQoiiI89bira2tkavzc3NiY+PB+DWrVsAODk5ZVlOhQoVjF7fvHmTI0eOYGZmliatqakpkNIy7NGjB7GxscyZM4eaNWtiMBh4++23uXjxopZ+6dKl2Nvbs3jxYiZNmoSzszPTpk3jjTfe0I514MCBdI+VWecivV5vdBtWCCFE8VKgg/fLli0LkOVK5JB2nI69vT0NGjRgxYoVGeb566+/OHLkCFu2bKFnz57a/gcPHhils7GxISgoiKCgII4dO8aHH37IyJEjcXd3p127dtjb29OlSxfeeeedNMeQoCeEEM+uAg2KtWvXpnLlyqxcuZK+ffvmKK+Pjw87d+7Eyckpw5ZmavAzNzfX9l24cIGffvoJV1fXdPPUr1+fJUuWsGLFCk6ePEm7du3w8fFhzZo1uLm5YTAYclRPIYQQxVeBBkWdTseiRYsYMGAAvXv3ZvDgwej1eiIjI2nevDnPP/98hnkHDx7Mp59+iqenJxMnTsTV1ZWYmBiOHDnCw4cPmT9/PnXq1KFy5cpMnTqVpKQk7t69S2BgIJUqVTIqq02bNvTq1Qt3d3dMTU1ZvXo15ubmtGvXDkjpnbp27Vo8PDwYO3YsVapU4caNGxw8eBAnJyfGjx+fr9dJCCFE4SjwuU/79etH6dKlmTt3Lv3798fCwoImTZrQq1evTPPp9Xr27NnDrFmzmDt3LlevXsXBwYHGjRtrvV31ej0hISGMGjUKPz8/nJ2dmTlzJnv27OHnn3/WymrTpg2rV6/m3LlzmJiYUL9+fb755hvc3NyAlNu8Bw4cYObMmUyZMoVbt25Rvnx5WrZsmWU9hRBCFF86pVTxmPSwGIqLi8PGxoa/r12jTJkyOcqb2w8ltzMmloQfgoK+NiVh9kq5NnkrLi6OChUrEhsbm+PvDJE3SsDUykIIIUT2FNug+PgUc/ktJiaGWbNmceLEiQI5nhBCiMJRbINiQYqJiWH27NkSFIUQ4hknQVEIIYT4R7EPihERETRu3BiDwcBzzz1ntJKFUopFixbh6uqKXq+nevXqLFmyxCj/qVOn6N+/P87OzpQuXZq6deuyePFikpOTATh//jzVqlUDwM/PD51Oh06n4/z58wV2jkIIIQpGgQ/JyEvXrl1jzJgxTJ06FRsbG6ZNm0avXr04c+YMZmZmjB07li+++IIZM2bQokUL9u/fz5QpU7C0tGTEiBEAXL58mdq1azNo0CCsra05evQogYGB2hhHR0dHQkJCePHFF5k3bx4dOnQAwNHRMU19EhISSEhI0F7LKhlCCFG8FOugePv2bX744Qfq1asHpExA3qFDBw4ePIijoyMff/wxy5cvZ/jw4UDKrDj3799n9uzZDB8+HBMTE7y9vfH29gZSWpZt27bl/v37fPzxxwQGBqLX62ncuDEAtWrVomXLlhnWR1bJEEKI4q1Y3z51cnLSAiKgLSkVFRXF7t27Aejdu7fRqho+Pj5cu3ZNW8EiPj6ewMBAatasiV6vx8zMjBkzZnD16lXu3r2bo/rIKhlCCFG8FeuWYnorckBKoLt58yZKKRwcHNLNe+nSJVxcXJgyZQqff/45gYGBNG3aFFtbW7Zu3cq7775LfHw8VlZW2a6PrJIhhBDFW7EOipmxt7dHp9Oxb98+ownCU9WuXRuA4OBgXn/9daZMmaK9t2PHjgKrpxBCiKLjmQ2Kqc8Jb926Rffu3TNM9+DBA6OgmZSUxNdff22U5vEWqBBCiGfXMxsUXV1dGTVqFC+//DKTJk2iRYsWPHr0iD/++IPw8HC2bNkCQMeOHfn888+pW7cuDg4OLFu2zKgHKUDFihWxtbXlq6++olq1auj1eho0aJBuC1QIIUTxVaw72mTlo48+4t133+Xrr7+mW7duvPTSS6xfvx4PDw8tzdKlS/Hw8GD06NEMGzaM+vXrM336dKNyTExMWLlyJefOncPb25vmzZtna6FkIYQQxYuskpGPUlfJuPlXFGWsczbjfZIhd414kwdJucqXbDDNVb6nISssZKK4/FYWpw8jKZcX1TSXJ5mLw8XFxVHBUVbJKEzPdEtRCCGEyIkcBcX8XpmiUaNG+Pv751v5QgghRGakpSiEEEL8o1gFRaVUmp6hQgghRF7JVVAMDQ3F3d0dCwsLmjZtyoEDB4zeX7VqFQ0aNMDCwoJKlSoxY8YMkpKMO4Ds37+fpk2bYmFhgbu7O6GhoWmOk3q7dufOnTRs2BC9Xs8333wDQEhICI0aNcLCwgInJycCAgLSjCO8cOECffr0wcbGBoPBQOfOnTl27JhRmqpVq/Lmm28SFBSEs7Mz1tbW+Pv7k5CQwNGjR2nTpo22AseTeYUQQjxbctzF8erVq4wcOZJZs2ZhZ2fHggUL6Ny5M3/++Sfly5fngw8+YPLkyYwfP57Fixdz8uRJLSguWLAASFndonPnztSvX58NGzYQHR3NG2+8wb1792jUqJHR8a5cucKYMWOYOXMmVapUoUqVKmzbto0+ffrQv39/FixYwKlTp5g+fToXL15k48aNANy5cwdPT09MTExYvnw5FhYWzJ07l/bt2/Pbb7/h7OysHWPr1q24u7vz6aefcvbsWQICAjA3NycyMpKAgAAqVKjAlClT8PPz48SJE5iYpP+3hKySIYQQxVuOg+Lt27cJDg7Gy8sLAA8PD5ydnVmyZAnTp08nMDCQyZMnM2/ePCBlcLy5uTkBAQFMmjSJsmXLEhQUhE6nIzQ0FBsbGwCcnZ21WWgeFx0dTWhoKC1atND29e3bl5YtW7Ju3ToAunTpQunSpXn99dc5duwY9evXZ+XKlVy4cIHjx4/j5uam1bVKlSoEBQWxePFio+Ns3bpVG4wfERHB559/TmhoKF26dAEgOTmZ7t27c+zYMRo2bJjutZFVMoQQonjL8e1TGxsbLSCmvvbx8eHgwYPs37+fu3fv4ufnl2ZligcPHvD7778DcPDgQTp06KAFRAAvLy/s7e3THK9s2bJGAfHu3bscPXqUPn36GKXr168fAPv27QPgxx9/xN3dXQuIkDIfaseOHbU0qTw8PIxmp3F1dcXExMToPF1dXQEyXflCVskQQojiLcctxXLlyqXZV6FCBU6ePMnNmzcBaNKkSbp5U4PE1atXqVmzZpr3y5cvn27Zj4uJiUEplWa/jY0Ner2e27dvAyktzCfTpJaXGpxTpbfahqWlpVGgzM78p7JKhhBCFG85Doo3btxIs+/vv//G0dFRa+mFhIQYPbNLVa1aNSBl1frr16+neT+9fTqd8WwStra26HS6NGljY2NJSEjQ6mBvb8/p06fTrWt6LVIhhBAix7dPY2Nj2bNnj9Hr3bt306JFC1q1akXp0qWJioqiWbNmabayZcsC8NxzzxEeHk5sbKxWzp49e7RWXmasrKxo1KiR1qEm1YYNGwBo27at9v9jx44ZBcbo6Gh2796tpRFCCCEel+OgaG9vz7Bhw1i9ejXbtm3D19cXpRTjxo3D1taWOXPmMHnyZKZMmUJoaChhYWEsX74cX19f7t+/D8C4ceNITk7G19eXbdu28eWXX/LKK69oQTMrs2bNIjIykpdeeolvv/2WDz/8kHHjxtG7d2/q168PwNChQ3FxcaFbt258/fXXbNmyhU6dOlGqVCnGjRuX09MWQghRAuT49qmjoyPvvfcekyZN4syZM9SrV49du3Zpz+8mTJhApUqV+OCDD1i6dClmZmbUqFGD559/Xnsu5+joSGhoKGPGjMHPz48aNWrwySefMGPGjGzVoUePHgQHBzNnzhx69uyJvb09w4cPZ/78+Voaa2trIiIiCAgIYPjw4SQlJdGmTRv27t2b7q3d/JA61/qdO3dynDcpKZcTgsfnckLwJJkQvEiRCcHzXjGYEDz1u0LWaSg8skpGPoqKiiqwACyEeHZcunSJypUrF3Y1SiQJivkoOTmZK1euYG1tnabDUFxcHM7Ozly6dCnHS8TkNq/kKxr5ilNdJV/BHlMpxZ07d3BycspwkhCRv3J3j05ki4mJSZZ/7ZUpUybX66blNq/kKxr5CuOYki9v8+XHMR8fvy0KnvwpIoQQQvxDgqIQQgjxDwmKhUSv1xMYGJirGXBym1fyFY18hXFMyZe3+QrrmCL/SUcbIYQQ4h/SUhRCCCH+IUFRCCGE+IcERSGEEOIfEhSFEEKIf0hQzAOnTp2iY8eOGAwGKlasyOTJk3n48GGW+apWrYpOp0uzPb5mY0RERLpp+vfvX2h1z6hOOp2OOnXqFEqdAG7dusWIESOoUqUKBoMBd3d3li9fnq2659X1LKr++usvRowYQaNGjShVqhTu7u7Zyrds2TKef/55ypUrh06nS7M6DeT/Nc1t3V966SVq1aqFwWDAzs6O9u3bExYWlid1epp6PS4oKAidTsfzzz9vtL+k/pwWBTKjzVOKjo7Gy8uLWrVqERISwuXLlwkICOD+/ft8/PHHWebv06cPEyZMMNqXXlftlStXGgUcBweHQqt7kyZNiIyMNNoXFxeHr68vvr6+hVInAD8/P06dOsW8efOoUqUKO3fu5I033sDU1JTXXnvNKG1+XM+i7Pjx4+zYsYMWLVqQnJxMcnJytvKtXr0agK5du2r/zkh+XdPc1v3hw4cEBARQq1Yt4uPjWbFiBV27diU8PJx27doVWr1SXbt2jdmzZ6e7uHqqkvZzWiQo8VTmzZunDAaDunXrlrbv008/Vaampury5cuZ5nVxcVGjRo3KNE14eLgC1OHDh/Okvo97mro/aeXKlQpQhw4dKpQ6Xb16VQFq5cqVRvvbt2+vvLy8tNf5eT2LsqSkJO3fQ4YMUfXq1ctRvnPnzilABQcHp0mT39c0t3V/UmJionJ2dlavvfZakajXyy+/rAYPHqw8PDxUt27djN4rqT+nRYHcPn1KoaGh+Pj4YG9vr+3r27cvycnJeXqrJj/kZd3XrVtHrVq1aN68eaHU6dGjR0DaeSNtbGyKxDI8oaGh2i2wx5dI69WrFzqdDoPBwB9//JFvx8/t5NJFYVLqvKqDqakptra22boVnx1PU699+/axZcsWFixYkCd1EXmn8H/ii7lTp06leY5ma2uLo6Mjp06dyjL/2rVr0ev1WFlZ0bVrV44dO5Zuuq5du2JqakrlypWZNGkSDx48KPS6p/r777/Zs2cPAwcOLLQ6OTs706lTJ+bNm8eJEye4c+cOGzZsICwsjFGjRqVJnx/XMzO+vr4MHz4cgIULF3Ls2DE2bNjAli1bAHjvvfdwdXXN1zrkt4K+ptmhlCIxMZFbt26xaNEi/vzzT15//fVCrVNSUhJvvvkmM2bMwNHRMdO0RfGaPuvkmeJTio6OxtbWNs1+Ozs7bt++nWneHj160KJFC6pUqcLZs2eZO3cubdu25ciRI1SvXh1IaelMnjyZ9u3bY2lpyZ49e1i0aBEnT55k+/bthVb3x61fv56kpKQ8CYpPU6eQkBD69etHvXr1gJSWwdKlS+ndu7eWJj+vZ1YWL17M7t27OXv2LEOHDuXSpUsA+Pj4pBu4i4vCvKZZWbFihfY82crKivXr19OqVatCrdOyZcu4d+8e48ePzzBNUb6mzzoJioXoo48+0v7drl07OnXqRJ06dVi0aBHLli0DoHHjxjRu3FhL5+XlhaOjI2+++SaHDh3iueeeK/B6P2nt2rU0bdq0UFs6SimGDh3Kn3/+ybp163B0dOS7775j3Lhx2NnZab32CvN6WllZsXr1atq3b88vv/wCpHz5rVy5Ms16m8VJUf4ZfeGFF2jUqBE3b94kODiYvn37snnz5qfuEJZb169f5+2332b16tWYm5tnmK4oX9Nnndw+fUp2dnbExsam2R8dHW30XCw7HB0dadu2rfaFmZG+ffsCZJkuK3lR9zNnznDo0CEGDRr0VHV52jrt2LGD4OBgNm7cyIABA/D09GTu3LkMHjw4Te/eJ+XV9cyONm3a0KxZM+11jx49nskV1gvymmbGwcGBZs2a0aVLF1asWIGvry+TJk0qtPq8/fbbNGjQgHbt2hETE0NMTAyJiYkkJiZq/85IUbmmzzoJik+pTp06aZ51xcbGcvXq1TwZs5ef8qLu69atw8TEJM/GT+W2TidOnMDU1DTNWLHGjRtz5coV7t+/nyf1e1pffvklhw4d0l6vXbuW/fv3F2KNSpamTZvy119/FdrxT506xd69e7Gzs9O2n376iV27dmFnZ8fu3bsLrW4ihQTFp+Tr68vu3buJiYnR9gUHB2NiYkKnTp1yVNaVK1fYt29flj04v/76a4Cn7umZF3X/6quv8PT0zLLDQH7XycXFhaSkJH777Tej/b/88gvly5endOnSGebNq+uZlUuXLjF27FggpYXo5uZGcnIyQ4YMKTJBO68U1DXNqX379mnP6wtDUFAQ4eHhRlvDhg1p2bIl4eHhmd4WLarXNDO5neBAKcWCBQuoUqUKlpaWtGrVigMHDuRzbf89uHgKt2/fVo6OjsrDw0Pt2rVL/fe//1W2trZpxh96eXmpGjVqaK/XrVunBg4cqNasWaP27NmjvvjiC1WjRg1lZ2enzp49q6UbNGiQCgwMVFu3blW7du1SU6ZMUebm5uqFF14otLqn+r//+z8FqC+++OKp6/K0dYqLi1NVqlRRNWvWVP/73//U7t271eTJk5WJiYl65513tHT5eT0zk5ycrLy9vRWg7Ozs1NWrV1VkZKQyMTFRgBo5cmS+Hv/evXsqODhYBQcHK09PT+Xs7Ky9vn79ulIq/c/58OHDKjg4WC1btkwBasKECSo4OFhFRERoafL7muam7tu3b1d9+/ZVq1evVuHh4WrTpk2qd+/eClBfffVVodUrPemNUyysn9O8tmXLFlW5cmXVu3dvVb9+/WyP5Zw/f74yNzdXH3zwgdq9e7fq1auXsra2VmfOnMnnGislQTEPnDhxQnl7eytLS0tVvnx5NXHiRJWQkGCUxsPDQ7m4uGivIyMjlaenp3JwcFClSpVSDg4Oqm/fvurUqVNG+ebNm6fq1aunrKyslJmZmXJ1dVWzZs1KU35B1j3VxIkTlV6vV9HR0XlSl6et059//qn69u2rnJycVOnSpVW9evVUUFCQSkxM1NLk9/XMyNKlSxWgALVq1Spt/4QJExSgdDqdCgsLy7fjpw6+T28LDw9XSqV/TYcMGZJuHg8PDy1Nfl/T3NT95MmTqmfPnsrJyUmZm5srJycn1aVLF6NgXhj1Sk96QbGwfk7zWm4mOHjw4IEqU6aMmjZtmrYvISFBubi4qDfeeCNf6vk4WWRYCCFEvvP39+fnn3/m999/zzTdnj178Pb25siRIzRq1EjbHxAQQEhICOfPn8/XesqQDCGEKMHi4+OzPcuPUirN8CG9Xp/ufM25ldrR7smOdW5ubly8eJEHDx5gaWmZZ8d7kgRFIYQooeLj46lgaUcc8VknJmWs7d27d432BQYGMmvWrDyrU3R0NHq9HgsLC6P9dnZ2KKWIjo6WoCiEECLvPXz4kDjimW3SDQvMMk0bzyMC7+7g0qVLlClTRtufl63EokCCohBClHCldXosdZkHRZN/1o8oU6aMUVDMa3Z2diQkJBAfH2/UWoyOjkan02FnZ5dvxwYZp5gjS5YsoUqVKpiamvLCCy/kSZlBQUHs3LkzT8oSQhQMf3//XC0qXFSZ6kyytRWE1GeJp0+fNtp/6tQpbdxifpKgmE1//vknEyZMYNCgQfz444+8//77eVKuBEUhRGEz0emytRWE1q1bU6ZMGYKDg7V9jx49IiQkhK5du+b78eX2aTadPn0apRSvvfZaoc6IkZX87pklhCj6cvo9kBL0Mm8jmZDzoHj//n3tj/4LFy4QFxfHxo0bAfDw8KBcuXJ4e3tz4cIFbfo9CwsLpk2bxqxZsyhXrhz169dn2bJl3Lp1i4kTJ+a4DjklLcVs8Pf3p3v37gDUqFEDnU7HJ598wptvvknt2rUpXbo0VatWZcSIEWkms962bRvNmjXDysoKW1tbmjVrpv2QVK1alQsXLvDJJ59oC9CuWrVKy7tq1SoaNGiAhYUFlSpVYsaMGSQlJRm9r9PpiIyMpGPHjhgMhkKd7FiIZ8nx48fp2rUrZcuWpXTp0tSuXTvTO0THjh2jc+fOGAwGbGxs6NOnDxcvXtTeHzZsGO3atdNe37x5ExMTE6Np2+7evYuZmZlRK+nkyZP07NkTGxsbDAYD3bp148yZM0bH1ul0LFiwgClTplCxYkXKly+fo3MtZWKarS2nrl+/jp+fH35+fkRERHDp0iXt9fHjx4GU9SWfnAh9ypQpBAYGsmjRIrp27UpUVBS7du0qkAaJtBSz4a233qJu3bpMmTKFkJAQHB0dqVGjBm+//TZz586lXLlyXLp0iblz5/LCCy8QHh4OpKwg0adPHwYMGMD8+fNJTk7m119/JTo6GoDNmzfTtWtX2rZtq63kUKNGDQA++OADJk+ezPjx41m8eDEnT57UguKTq3UPHDiQ4cOHM3369Ezn+BRCZF/37t2pUKECK1aswMbGhr/++ouoqKh00166dIn27dtTo0YN1qxZQ3x8PDNmzMDDw4PffvsNa2tr2rdvz9q1a7UOJHv37kWv13PkyBHu3LmDtbU1+/fvJzExkfbt2wNw9uxZWrdujbu7O6tWrcLExIS5c+fi7e3N6dOnjXp+fvjhh7Rs2ZIVK1ZkutpGekx0JtloKea8DVW1alWymh8mIiIizT6dTse0adOYNm1ajo/51PJ9zpxnxObNmxWgzp07l+77jx49Uvv27VOAOn36tFJKqeDgYAWouLi4DMt1cXFJM69nXFycsrKyMprmSCml/vOf/yhLS0t18+ZNpZRSK1euVIBasGDBU5yZEOJJN27cUIDatm1buu8/OWXZ+PHjlcFgULdu3dL2nTx5Uul0OvXRRx8ppZQ6e/asArSp5saOHasGDBigypYtq0JDQ5VSSs2YMUO5urpqZQwePFhVr15dPXjwQNt3/fp1ZWVlpT755BNtH6Dq1q2rkpOTc3SesbGxClCflX5ZrTEMy3T7rPTLClCxsbE5OkZxI7dPn8L//vc/GjdujJWVFWZmZrRt2xaAP/74A4AGDRpgamrKwIED+eabb9JdJzA9+/fv5+7du/j5+WlrrSUmJuLj48ODBw/STJPUrVu3vD0xIUq4smXL4uLiwrRp0/jyyy8zbCGm+vHHH/Hy8jJa87NOnTo0bNiQffv2AVCtWjUqV67M3r17Adi7dy+enp60a9eOH374QduX2koECAsLo0ePHpQqVUr7HrCzs6Nx48YcPnzYqA6+vr65Xqw6taWY1VYSlIyzzAebN29m8ODBPPfcc2zYsIEDBw6wefNmIGWWCABXV1e2b99ObGwsvXr1oly5cvTo0cPoOUN6bt68CUCTJk0wMzPTtlq1agEpt2oeV6FChbw+PSFKNJ1OR1hYGG5ubowaNQpnZ2eaNWumBbQnRUdHp/t7WKFCBW7fvq299vDwYO/evcTFxfHrr7/Svn172rdvz969e0lISODQoUNGQfHmzZsEBQUZfQ+YmZnx448/5un3QFHqfVrY5JliLgUHB9OoUSM+/fRTbV/qX3uP69KlC126dCEuLo5vv/2W8ePHM3ToUL7//vsMy079azMkJARnZ+c071erVs3odW7/OhRCZMzV1ZXg4GAePXrE/v37mT59Ot27d+fy5ctp0trb23P9+vU0+//++29cXV211+3btycgIICIiAgcHByoU6cO9+7dY8qUKYSHh5OQkGDUGcfe3p5u3boxcuTINGVbW1sbvX6a74H8eqZYHElQzKUHDx5gbm5utG/t2rUZpi9Tpgx9+/bl4MGDfPXVV9p+c3NzrWWZqlWrVpQuXZqoqCh69eqVtxUXQuSImZkZHh4eTJ06lR49enDlypU0adq2bctnn31GdHS0NuPK6dOn+e2333jllVe0dO3bt+fevXt88MEHWouwUaNGWFpasmDBApydnalataqW3sfHh99//53GjRtjaprz3p/ZVcrElFK6zMsvpfLv+EWJBMVc6tixI6NGjeKdd96hVatW7Ny5M03r79NPPyUyMpIuXbrg6OjIuXPnWLNmjdEK8m5ubuzZs4fvvvsOOzs7qlWrRtmyZZkzZw6TJ08mKioKT09PTE1NOXv2LFu3bmXTpk3Sy1SIfPTbb78xYcIE+vXrR40aNYiNjWX+/PlUrVpV6yH+uPHjx7Ny5Uo6derEjBkziI+PZ+bMmVSpUgV/f38tXZ06dShfvjw//PADH330EQCmpqa0adOG0NBQBg0aZFTu7Nmzad68OZ07d2b48OFUqFCBa9eu8cMPP9CuXTsGDBiQJ+ebnRlrTEtIS7FknGU+eP3115kwYQJLly7lxRdf5NKlS6xbt84oTYMGDbh58yYBAQF06tSJwMBABgwYwLJly7Q08+bNo3LlyvTu3ZvmzZvzzTffADBhwgRWrlxJeHg4vXv3xs/Pj88++4zmzZunaaEKIfJWxYoVqVixIvPnz8fX15fXX38dZ2dnwsLC0m2xOTs788MPP2BnZ8egQYMYPnw4DRs2JCIiIs1tztQW4uPPDj08PNLsA6hZsyaHDh2ibNmyjBw5ks6dOzN16lTu3btHgwYN8ux8UwfvZ76VjMc0ssiwKNK2bNnClStX0n2mklsxMTEEBQXRt29f6tatm6O82V0oVafTsXDhwgKZgSPVunXrCAwM5Pz589SrV4+jR4/mSbnnz59n1apVDB8+HCcnpzwpUxQNcXFx2NjYEGz3BqVNMl/t4n5yAn7R/yE2NjZfJwQvbNJSFEXali1bjFrWeSEmJobZs2dz4sSJPC33cZGRkWluheWnu3fv8sorr9C2bVsiIiL43//+l2dlnz9/ntmzZ6f7LE08G3TZGI6hKyFDMuSZongmKKV4+PBhkVnbrWXLlgV6vPPnz5OQkMDLL79MmzZtCvTYOZWQkICZmRkmJiXjS7Y4yFbv0xISFEvGWYoiK7P5Jf39/fnyyy85fvy4NjdsaqeF1KV7du7cScOGDdHr9drz2MjISLy8vLQ5KAcOHKh1lz9//rw2pMXPz08r9/z580DKF/bMmTOpXr06er2eypUrG3WUSBUREUHjxo0xGAw899xz/PLLL0bv63Q6Fi1apL329PTk+eefZ+PGjdSuXRsrKyu8vLzSzGEZFRXF888/T+nSpXF2dmbJkiWMGzfOqEfik2bNmkX9+vUB8Pb2RqfTaSuhT506lfr162NlZUWlSpUYMGAAV69eTVPGjh07aNOmDaVLl8bOzg5PT0+OHDlCREQEHTp0AKB58+ba9Up14cIF+vTpo83L2blzZ44dO2ZUdtWqVXnzzTd5//33cXFxwdLS0mjsnih8Mk7xX9JSFIUqs/kl33rrLW7cuMGpU6e04S7lypXT8l65coUxY8ZovfyqVKlCZGQknp6edO3alfXr13Pv3j1mzpxJz549iYyMxNHRkZCQEF588UXmzZunfeE7OjoC0Lt3b/bs2cP06dNp2bIlN27cICQkxKjO165dY8yYMUydOhUbGxumTZtGr169OHPmDGZmGS/UevToURYuXMiCBQtISkoiICCAl156icjISCCltduzZ0/+/vtvPv30U2xsbFi4cCEXLlzItFX16quvUqNGDQYPHswnn3xCkyZNqFy5MpAyIfP06dNxcnLixo0bLF68GA8PD06cOEGpUim//uvXr2fAgAH07NmTdevWYW5uzk8//cTly5dp3749n3zyCaNGjWLlypXaWncAd+7cwdPTExMTE5YvX46FhQVz586lffv2/Pbbb0ZjbDdt2kStWrX48MMPMTU1xWAwZPGTIQqSmUkpzEwyDwdmJGX6/jOjcGeZEyVZVvNLKpV2jsnH9wPqwIEDRvvbt2+vWrdubTQH5PHjx5VOp1M7duxQSil17tw5Bajg4GCjvGFhYQpQ69aty7Q+Op1O/f7779q+8PBwBagff/xR2weohQsXaq89PDyUwWBQ169f1/alzl176dIlpZRSO3bsUIDau3evlubOnTvKxsZGubi4ZFgnpZQ6cuSIAlR4eHiGaRITE1VUVJQC1K5du5RSSiUnJ6vKlSurzp07Z5gv9fwOHz5stP/DDz9UOp1OnThxQtt369YtZTAYVEBAgLbPxcVFlS1bVt29ezfTcxAFL3Xu09AKE9Rex+mZbqEVJsjcp0Lkp5zOL5le/hYtWmiv79+/z08//YSfn5+2HE1iYiKurq44OzunmSvySd9//z2lS5emf//+maZzcnKiXr162uvUHqxZ1b9Ro0ZGLd0n8x0+fBhbW1ujGU2srKzw9vbOtNzMhIaG0rp1a2xsbChVqpTWgkydn/f06dNERUUZDTDPrh9//BF3d3fc3Ny0ffb29nTs2FGb7zOVp6entA6LMJn79F8l4yxFkZTT+SWf9ORcj9HR0SQlJTF+/Pg0c0VevHgxzVyRT7p16xaOjo5ZTpdla2tr9Dp13OiTMxPlNN/Vq1eNgmaqnK6Nl+rw4cP06NEDJycn/ve//xEZGcmBAweMjnnr1i2AXA21yO58n6n7RNGlMzXJ1lYSyDNFUagym1/Sysoq07xPBi9bW1t0Oh3Tp0/nhRdeSJPewcEh0/LKli3L1atXUUoVynyyjo6O3LhxI83+9ObUzI7NmzdjY2PDhg0btGeSFy5cMEpTtmxZgFwNt7C3t+f06dNp9v/9999Gq0WAzM9b1OlMdeiy6A1cUj7DkhH6RZH3+PyScXFx2pd0enPDZsRgMNCqVStOnjxJs2bN0mypPTgzatn5+Phw//59NmzYkHcnlgPNmzcnJibGqKV89+7dTCePz8yDBw8wMzMz+jJ7cn7e2rVrU7lyZVauXJlhORldr7Zt23Ls2DGjwBgdHc3u3bu1ZdRE8aArZZKtrSSQlqIoNNmZX9LNzY3//ve/fPXVV9SqVQsHB4dMhycsXLgQLy8v+vXrR//+/bGzsyMqKorvvvuOoUOH4unpScWKFbG1teWrr76iWrVq6PV6GjRogI+PD127duWVV17hzJkztGjRgtu3b7Nx40bWr1+f79fD19eXJk2aMHDgQObPn4+trS3vv/8+1tbWuRrT17FjR4KCghg9ejS9evUiMjIyzaD+1KEjAwYMoHfv3gwePBi9Xk9kZCTNmzfn+eefx9XVFVNTU/773/9SqlQpSpUqRbNmzRg6dChLliyhW7duvPvuu1rv01KlSjFu3Lg8uiqiIOjMS2Fimnk40CWVjHBRMkK/KJKyM7/ksGHD8PPzY/To0TRv3lwbf5eR1q1bs2/fPu7evcvQoUPp2rUrc+bMoXTp0tSsWRMAExMTVq5cyblz5/D29qZ58+Zay3TTpk2MGTOGTz/9FF9fXwICArK8jZtXdDodW7dupWHDhgwfPpzXX3+dbt264ePjg42NTY7L69q1K++99x5bt26lR48e7N27l+3bt6dJ169fP7Zu3crly5fp378/AwYMYN++fVqnHAcHBz755BNtEurmzZsDKUsXRUREaPUdNGgQdnZ27N27N90lz0TRJS3Ff8ncp0IUYQ8fPqRu3bq0a9cu01ucQuRG6tyn4Q1nY2VqkWnau0nxdPg18Jmf+7RktIeFKCY+++wzkpOTqV27NtHR0fznP//h/PnzfP3114VdNfEM05XKunepzH0qhChwFhYWLFiwQJt2rmHDhuzYsYNmzZoVbsXEM01nmvXtUV0JedomQVGIImTw4MEMHjy4sKshSpjsjEPUlZC5XiQoCiFECWeiN8WkVObhwCSdxZWfRRIUhRCihMtO71K5fSqEEKJEyNYzRbl9KoQQoiTI1jPFZAmKQgghSoBs3T6VlqIQQoiSQILivyQoCiFECWdiXgoT8yx6n+pKRrgoGWcphBAiQzpTHTrTzJeGyur9Z4UERSGEKOF0pXRZ3z5NlqAohBCiBMjWM0XpfSqEEKJEyMaQDLJ6/xkhQVEIIUq4bA3eT5KgKIQQogTI1uB9aSkKIYQoCbI1JEOVjHBRMs5SCCFExkx1KVtWaUoACYpCCFHC6UxM0Jlkcfs0i/efFRIUhRCihNOZZOOZogRFIYQQJYHORJeNlqLcPhVCCFESyDNFTcloDwshhMiQiVmpbG05derUKTp27IjBYKBixYpMnjyZhw8fZpmvatWq6HS6NFt8fHxuTi9HJCiKZ1rVqlV58803tddbtmxh2bJlOS7n/Pnz6HQ6Nm7cmJfVA8Df3x93d/cs0+l0OhYtWmS0b/LkyTg6OmJiYsK4ceMyPL/sHkOUTKkdbbLaciI6OhovLy8ePnxISEgI8+bN47PPPiMgICBb+fv06UNkZKTRptfrc3N6OSK3T8UzbfPmzdjZ2Wmvt2zZws8//8zIkSMLsVa5ExkZiYuLi/Z69+7dLFy4kCVLltCiRQucnJwIDAxM9/zeeust7t27V9BVFsVFPtw+Xb58OXFxcWzevBl7e3sAEhMTGTlyJNOnT8fJySnT/BUqVKBly5Y5OmZekKAonmmNGzcu7CrkmSe/IE6dOgXAmDFjMMnir/gaNWrkW71E8afTZWNIhi5nLcXQ0FB8fHy0gAjQt29fRowYQVhYGP7+/rmpar6T26eiWEq9HRgaGoq7uzsWFhY0bdqUAwcOGKV7/Papv78/X375JcePH9eeUTz+ixkZGUmnTp0oU6YM1tbWtGjRgu+++86ovPj4eN58803s7OxwdHRk4sSJJCYmZlrX48eP07VrV8qWLUvp0qWpXbs277//fpp0ERERNG7cGIPBwHPPPccvv/xi9P7jt089PT0ZPXo0AKampuh0OqpWrZrh+T15+3TVqlXodDqOHDmCr68vBoOBWrVqsXr1aqNjKqWYM2cOFStWxMrKCj8/P3bv3o1OpyMiIiLT8xbFR+o0b1ltAHFxcUZbQkJCumWeOnWKOnXqGO2ztbXF0dFR+4MuM2vXrkWv12NlZUXXrl05duzY059oNkhQFMXW1atXGTlyJJMmTWLDhg3o9Xo6d+7M9evX003/1ltv0bVrV6pXr649o3jrrbcA+Omnn/D09CQhIYEvvviCTZs20bNnTy5evGhUxowZMzAxMWHDhg2MGDGCxYsX88UXX2Raz+7duxMdHc2KFSvYsWMHEydOTHMr89q1a4wZM0Y7l/j4eHr16sWjR4/SLXPZsmWMGzcOQDuXrVu3Znh+GRk0aBCdOnViy5YtNG7cGH9/f06ePKm9v3TpUmbNmoW/vz8hISHUqFGDV199NdMyRfGTMiQj6w3A2dkZGxsbbZs/f366ZUZHR2Nra5tmv52dHbdv3860Pj169ODjjz9m9+7dfPLJJ/z111+0bduWs2fPPvW5ZkVun4pi6/bt2wQHB+Pl5QWAh4cHzs7OLFmyJN1f1Bo1alCuXDkuXLiQ5lbk5MmTqVmzJnv27MHU1BSATp06pSmjRYsWfPTRRwB07NiR8PBwNm7cyIgRI9Kt482bNzl37hwffvgh3bt3B6BDhw7pnssPP/xAvXr1ADAYDHTo0IGDBw/Stm3bNOnr1q2rPV98/FwyOr+MvPnmm9rzx9atW7Njxw42bdrEzJkzSUpKYsGCBQwdOpQFCxZo1+TmzZusWLEiW+WL4kFnVgpdFr1LdYkp71+6dIkyZcpo+/Oj80vq7xhAu3bt6NSpE3Xq1GHRokW56iiXE9JSFMWWjY2NFhBTX/v4+HDw4MEclXP//n0OHDjAkCFDtICYkScDZd26dYmKisowfdmyZXFxcWHatGl8+eWXGaZ1cnLSAmJquUCmZeeFx8/HYDDg4uKiHTMqKoqrV6/So0cPozw9e/bM1zqJgqcz1WXj9mlKS7FMmTJGW0ZB0c7OjtjY2DT7o6OjjZ4zZoejoyNt27ZN80ghP0hQFMVWuXLl0uyrUKECV69ezVE50dHRJCcnZ9kbDkhzO8jc3DzTsVM6nY6wsDDc3NwYNWoUzs7ONGvWjL1792ZZLpDv47IyO5/U6/jkdS5fvny+1kkUvJzcPs2uOnXqpHl2GBsby9WrV9M8ayxKJCiKYuvGjRtp9v399984OjrmqBxbW1tMTEy4cuVKXlXNiKurK8HBwURHRxMREYFer6d79+7cvXs3X46XV1Kv45PXOaNntqIYMzHJ3pYDvr6+7N69m5iYGG1fcHAwJiYm6T6ayMyVK1fYt28fzZs3z1G+3JCgKIqt2NhY9uzZY/R69+7dtGjRIsM86bXsDAYDrVq1YvXq1SQlJeVbfc3MzPDw8GDq1KnExcXlSxDOquWaE5UrV6ZixYps3brVaP+WLVvypHxRdKROCJ7plsOgOGLECKytrXnhhRcICwtj5cqVTJo0iREjRhjdlfH29qZmzZra66+++opBgwaxdu1awsPDWbFiBe3bt8fU1JQJEybk2TlnRDraiGLL3t6eYcOGMXv2bGxtbVmwYAFKKa1XZnrc3Nz473//y1dffUWtWrVwcHCgatWqLFiwAC8vL3x8fBg5ciR2dnb83//9Hw4ODrzyyiu5ruNvv/3GhAkT6NevHzVq1CA2Npb58+dTtWrVfBk7mNH55YapqSnTpk1j3LhxVKhQgQ4dOhAeHs7u3bsBshwbKYqR7MxYk8PP287Oju+//57Ro0fzwgsvYG1tzauvvsrcuXON0iUlJRkNa6pWrRpXrlxh3LhxxMTEYGtri5eXF3PmzKFatWo5qkNuSFAUxZajoyPvvfcekyZN4syZM9SrV49du3ZRoUKFDPMMGzaMQ4cOMXr0aG7dusWQIUNYtWoVbdu2JSIigpkzZ+Lv74+pqSn16tXj3Xfffao6VqxYkYoVKzJ//nwuX76MjY0N7dq1Y82aNVl26smNjM4vt0aPHk10dDTLli3jo48+wsfHh4ULF9KvXz9sbGzyruKiUOnMTNGZZf7zmNX76XFzc9P+iMrIk+NdW7ZsSXh4eI6PlVd0SilVaEcXIpf8/f35+eef+f333wu7KiXOW2+9xeLFi7l16xaWlpaFXR3xFOLi4rCxseHCukOUKW2Vedr7d3EZ+ByxsbFGQzKeNdJSFEJk6OTJk6xZs4bWrVtjbm5OREQEixYt4o033pCA+AzJTu9SWU9RCFHilS5dmsjISP7zn/9w584dKlWqxKRJk5g1a1ZhV03kocenccssTUkgt0+FEKKESr19emnj/1HGkMXt03t3ce7TRG6fCiGEeLbpdCkTTWSVpiSQoCiEECWdiS5lyypNCSBBMR8lJydz5coVrK2ts/wrTAghlFLcuXMHJyenAh0HKs8U/yVBMR9duXIFZ2fnwq6GEKKYuXTpEpUrVy6w40nv039JUMxH1tbWAJw7elL7d3Ykm+f8L7JStx/mKP2jChY5PsazpKB6l5WMr5G8ZXYj/UVrM5WbRszD5Bwlv/DpDzk+hPNEnxylv3PnDtUau+Xo+yJPmJD1NSwZDcWnC4oygDpzqbdMra2tKWOd/d5ayfpcBMVHOQyKZSQoFgQJijlnFl80g6K1vnSOD5GT3/vHFfTjFp1Ol42ONiXjp/mpguJbb72VZgVxIYQQxYx0tNE8VVDMjwmN0/PgwQOZPUMIIfKLBEXNU90l9vf3x93dHYBVq1ah0+k4cuQIvr6+GAwGatWqxerVq9Pk27FjB23atKF06dLY2dnh6enJkSNHgJTJYXU6HTt27KBPnz6UKVMGPz8/AGJiYhg5ciSOjo7o9XqaNm1KWFhYmrI7duxI+fLlKVOmDC1atODbb781ShMTE8Nrr71GpUqVsLCwwNnZmf79+xuliYqK4qWXXsLBwQFLS0vat29fIKs+CyFEQdOZ6NCZZrFJUMydQYMG0alTJ7Zs2ULjxo3x9/fn5MmT2vvr16+ne/fulC9fnnXr1rF27VratGnD5cuXjcoZPnw4NWrUYPPmzUycOJGHDx/SsWNHtm/fzty5c9m2bRt169alW7duHDt2TMt37tw5unfvzv/+9z82bdpEmzZt6Nq1q9FM7AEBAWzfvp158+axa9cuFi5ciF6v196Pjo6mbdu2HD16lKVLl7Jp0yYMBgNeXl6ZLrCakJBAXFyc0SaEEEVdau/TrLaSIM97n7755puMHDkSgNatW7Njxw42bdrEzJkzUUoxceJEOnXqxObNm7U8Xbt2TVNOjx49eO+997TXK1eu5OjRo/z666/UrVsXgM6dO/Pnn3/yzjvvsGHDBu34qZKTk+nQoQPHjx/ns88+w9PTE4BDhw4xcOBAhgwZoqV9vKUYFBRETEwMhw4donz58kDKQpiurq4sWrSI999/P91znz9/PrNnz87R9RJCiEKXMqVN1mlKgDxvKXbq1En7t8FgwMXFhaioKABOnz5NVFRUthZt7datm9HrsLAw6tevj6urK4mJidrWsWNHDh8+rKWLiopiyJAhVKpUiVKlSmFmZkZYWBh//PGHlqZJkyasWrWKRYsWpdtzNiwsjA4dOmBvb68dx9TUFA8PD6NjPWnatGnExsZq26VLl7I8TyGEKGzSUvxXnrcUbW1tjV6bm5sTHx8PwK1btwBwcnLKspwnF4q9efMmR44cwczMLE3a1MVak5OT6dGjB7GxscyZM4eaNWtiMBh4++23uXjxopZ+6dKl2Nvbs3jxYiZNmoSzszPTpk3jjTfe0I514MCBdI+VWecivV5vdBtWCCGKBelooynQwftly5YFUmZ6ycqTY2Ls7e1p0KABK1asyDDPX3/9xZEjR9iyZQs9e/bU9j948MAonY2NDUFBQQQFBXHs2DE+/PBDRo4cibu7O+3atcPe3p4uXbrwzjvvpDmGBD0hxLNGxin+q0CDYu3atalcuTIrV66kb9++Ocrr4+PDzp07cXJyyrClmRr8zM3NtX0XLlzgp59+wtXVNd089evXZ8mSJaxYsYKTJ0/Srl07fHx8WLNmDW5ubhgMhhzVUwghipvUHqZZpSkJCjQo6nQ6Fi1axIABA+jduzeDBw9Gr9cTGRlJ8+bNef755zPMO3jwYD799FM8PT2ZOHEirq6uxMTEcOTIER4+fMj8+fOpU6cOlStXZurUqSQlJXH37l0CAwOpVKmSUVlt2rShV69euLu7Y2pqyurVqzE3N6ddu3ZASu/UtWvX4uHhwdixY6lSpQo3btzg4MGDODk5MX78+Hy9TkIIUaDk9qmmwOc+7devH6VLl2bu3Ln0798fCwsLmjRpQq9evTLNp9fr2bNnD7NmzWLu3LlcvXoVBwcHGjdurPV21ev1hISEMGrUKPz8/HB2dmbmzJns2bOHn3/+WSurTZs2rF69mnPnzmFiYkL9+vX55ptvcHNzA1Ju8x44cICZM2cyZcoUbt26Rfny5WnZsmWW9RRCiOJGJgT/l04pVVDTQJY4qata/33tWo5WqpYPJP/l5tc7N59LQR2npCuq1zlh94UcpY+7fxeX/s0LbHX71O+oW7+co4xV5pOQx929Q9mm1QqsboVFVskQQogSTqfLuiVYQvrZFN/FQB6fYi6/xcTEMGvWLE6cOFEgxxNCiAJlks2tBCghp/l0YmJimD17tgRFIcQzSWdqkq2tJJDbp0IIUdLpyPrBrNw+LR4iIiJo3LgxBoOB5557zmglC6UUixYtwtXVFb1eT/Xq1VmyZIlR/lOnTtG/f3+cnZ0pXbo0devWZfHixSQnpyxAev78eapVqwaAn5+fNsj1/PnzBXaOQgiRr1LnPs1qKwGKdUvx2rVrjBkzhqlTp2JjY8O0adPo1asXZ86cwczMjLFjx/LFF18wY8YMWrRowf79+5kyZQqWlpaMGDECgMuXL1O7dm0GDRqEtbU1R48eJTAwUBvj6OjoSEhICC+++CLz5s2jQ4cOADg6OqapT0JCAgkJ/64aLqtkCCGKA6VL2bJKUxIU66B4+/ZtfvjhB+rVqwekTEDeoUMHDh48iKOjIx9//DHLly9n+PDhQMqsOPfv32f27NkMHz4cExMTvL298fb2BlJalm3btuX+/ft8/PHHBAYGotfrady4MQC1atWiZcuWGdZHVskQQhRLcvtUU6xvnzo5OWkBEdCWlIqKimL37t0A9O7d22hVDR8fH65du6atYBEfH09gYCA1a9ZEr9djZmbGjBkzuHr1Knfv3s1RfWSVDCFEsSS3TzXFuqWY3oockBLobt68iVIKBweHdPNeunQJFxcXpkyZwueff05gYCBNmzbF1taWrVu38u677xIfH4+VlVW26yOrZAghRPFWrINiZuzt7dHpdOzbt89ogvBUtWvXBiA4OJjXX3+dKVOmaO/t2LGjwOophBCFTZnoUFkM3s/q/WfFMxsUU58T3rp1i+7du2eY7sGDB0ZBMykpia+//toozeMtUCGEeObIM0XNMxsUXV1dGTVqFC+//DKTJk2iRYsWPHr0iD/++IPw8HC2bNkCQMeOHfn888+pW7cuDg4OLFu2zKgHKUDFihWxtbXlq6++olq1auj1eho0aJBuC1QIIYodCYqaYt3RJisfffQR7777Ll9//TXdunXjpZdeYv369Xh4eGhpli5dioeHB6NHj2bYsGHUr1+f6dOnG5VjYmLCypUrOXfuHN7e3jRv3jxbCyULIURxkDokI6utJJBVMvJR6gz0e1vNx6qURbbzVRraOsfHKtOvfo7SmzxMzvExkvW5+BsqKRc/XgWxmGlBLXlRQAril7igVqIwScx5rtiNv+c4z9EpG3KU/vTtizk+RmJyUo7Sx6tHTOebAl8l4+bZy5Sxzvx4cXficKheSVbJEEII8YyT26eaHP3pn98rUzRq1Ah/f/98K18IIURaSqfTeqBmuJWQcYrP9DNFIYQQIieKVVBUSqXpGSqEEOIp6bK5lQC5CoqhoaG4u7tjYWFB06ZNOXDggNH7q1atokGDBlhYWFCpUiVmzJhBUpLxA+f9+/fTtGlTLCwscHd3JzQ0NM1xUm/X7ty5k4YNG6LX6/nmm28ACAkJoVGjRlhYWODk5ERAQECacYQXLlygT58+2NjYYDAY6Ny5M8eOHTNKU7VqVd58802CgoJwdnbG2toaf39/EhISOHr0KG3atNFW4HgyrxBCPBNkmjdNjjvaXL16lZEjRzJr1izs7OxYsGABnTt35s8//6R8+fJ88MEHTJ48mfHjx7N48WJOnjypBcUFCxYAKatbdO7cmfr167Nhwwaio6N54403uHfvHo0aNTI63pUrVxgzZgwzZ86kSpUqVKlShW3bttGnTx/69+/PggULOHXqFNOnT+fixYts3LgRgDt37uDp6YmJiQnLly/HwsKCuXPn0r59e3777TecnZ21Y2zduhV3d3c+/fRTzp49S0BAAObm5kRGRhIQEECFChWYMmUKfn5+nDhxAhOT9P+WkFUyhBDFknS00eQ4KN6+fZvg4GC8vLwA8PDwwNnZmSVLljB9+nQCAwOZPHky8+bNA1IGx5ubmxMQEMCkSZMoW7YsQUFB6HQ6QkNDsbGxAcDZ2VmbheZx0dHRhIaG0qJFC21f3759admyJevWrQOgS5culC5dmtdff51jx45Rv359Vq5cyYULFzh+/Dhubm5aXatUqUJQUBCLFy82Os7WrVu1wfgRERF8/vnnhIaG0qVLFwCSk5Pp3r07x44do2HDhuleG1klQwhRHOXX0lGnTp1i9OjR7N+/H2trawYPHsy7776b5cQnSinee+89li1bxo0bN2jUqBFLlizJdJWivJLj26c2NjZaQEx97ePjw8GDB9m/fz93797Fz88vzcoUDx484PffU8YSHTx4kA4dOmgBEcDLywt7e/s0xytbtqxRQLx79y5Hjx6lT58+Run69esHwL59+wD48ccfcXd31wIipMyH2rFjRy1NKg8PD6MPydXVFRMTE6PzdHV1Bch05QtZJUMIUSyZ6LK35UB0dDReXl48fPiQkJAQ5s2bx2effUZAQECWed977z0CAwMZP34827dvx9HRkU6dOnH27NncnmG25bilWK5cuTT7KlSowMmTJ7l58yYATZo0STdvapC4evUqNWvWTPN++fLl0y37cTExMSil0uy3sbFBr9dz+/ZtIOUDeTJNanmpwTlVeqttWFpaGgXK7Mx/KqtkCCGKpXy4fbp8+XLi4uLYvHmz1uBJTExk5MiRTJ8+HScnp3TzxcfHM3/+fCZMmMD48eMBaNeuHa6urixatIhly5blrCI5lOOgeOPGjTT7/v77bxwdHbUTDwkJMXpml6patWpAyqr1169fT/N+evt0TzzctbW1RafTpUkbGxtLQkKCVgd7e3tOnz6dbl3Ta5EKIURJpXRZj0PM6TjF0NBQfHx8jL5v+/bty4gRIwgLC8twTPr+/fuJi4ujb9++2j5zc3NefPFFQkJCclSH3Mjx7dPY2Fj27Nlj9Hr37t20aNGCVq1aUbp0aaKiomjWrFmarWzZsgA899xzhIeHExsbq5WzZ88erZWXGSsrKxo1aqR1qEm1YUPKlE1t27bV/n/s2DGjwBgdHc3u3bu1NEIIIcjRkIy4uDijLaNhcqdOnaJOnTpG+2xtbXF0dOTUqVMZViX1vSfzurm5cfHiRR48eJDz88uBHAdFe3t7hg0bxurVq9m2bRu+vr4opRg3bhy2trbMmTOHyZMnM2XKFEJDQwkLC2P58uX4+vpy//59AMaNG0dycjK+vr5s27aNL7/8kldeeUULmlmZNWsWkZGRvPTSS3z77bd8+OGHjBs3jt69e1O/fsocoEOHDsXFxYVu3brx9ddfs2XLFjp16kSpUqUYN25cTk9bCCGeYSqbW0qnSBsbG22bP39+uiVGR0eneTQFYGdnl2kDKDo6Gr1ej4WF8XzRdnZ2KKWIjo7O8dnlRI5vnzo6OvLee+8xadIkzpw5Q7169di1a5f2/G7ChAlUqlSJDz74gKVLl2JmZkaNGjV4/vnntedyjo6OhIaGMmbMGPz8/KhRowaffPIJM2bMyFYdevToQXBwMHPmzKFnz57Y29szfPhwow/H2tqaiIgIAgICGD58OElJSbRp04a9e/eme2s3P6TOtX4vMWfrMN55cDfnB8vh8A+ZEDwXeYpwl/SSPiF4bn5n7iXnbCKQB+pRjo+RpHI4ITgpxyjodRrUP/9llQZS+oY8PiH4M9ePQol8c+nSpez++SWbbLLJpm2XLl0qkO+o2NhYBairf19W9x7cyXS7+vdlBajY2NhslV2uXDk1derUNPudnJzUlClTMsz3ySefKEA9ePDAaP9nn32mdDqdun//fs5OModklYx85OTkxKVLl7C2tk7TYSguLg5nZ+c0f3VlJqd5CuIYBZWnqNaroPIU1XrlJk9RrVdB5cksvVKKO3fuZNgzM78oklBk3qrN6v0n1alTJ82zw9jYWK5evZrmeeGT+QBOnz5tNCb81KlTVKlSBUtLyxzVI6ckKOYjExMTKleunGmaMmXK5HhtspzmKYhjFFSeolqvgspTVOuVmzxFtV4FlSej9I+P3y4oObl9ml2+vr7MmzePmJgY7dlicHAwJiYmdOrUKcN8rVu3pkyZMgQHB2tB8dGjR4SEhNC1a9cc1SE3itWE4EIIIfKBSs7elgMjRozA2tqaF154gbCwMFauXMmkSZMYMWKEUUvY29vbaNy6hYUF06ZNY9GiRXz44Yfs2bOHAQMGcOvWLSZOnJhnp5wRaSkKIUQJl/IwM6uWYs7Y2dnx/fffM3r0aF544QWsra159dVXmTt3rlG6pKQkEhMTjfZNmTIFpRSLFi3SpnnbtWsX1atXz2Etck6CYiHR6/UEBgbmqOdWTvMUxDEKKk9RrVdB5Smq9cpNnqJar4LKk5tj5DdFMorMW4JZvZ8eNzc3du/enWmaiIiINPt0Oh3Tpk1j2rRpOT7m09IpVcB9f4UQQhQJcXFx2NjYcOnan5QpY51F2js4V6xFbGxsjp+3FifSUhRCiBIumSSSs+hdmtX7zwoJikIIUeKlDpHMKs2zT4KiEEKUcPn1TLE4kiEZ+ahq1arodLo0W2bLT6U6deoUHTt2xGAwULFiRSZPnszDhw8LoNbpe+mll6hVqxYGgwE7Ozvat29PWFhYtvJeuXKF3r17Y21tjb29Pa+++ipxOZyWLi8tW7aM559/nnLlyqHT6dJMLp+ZovK5/PXXX4wYMYJGjRpRqlQp3N3ds5VPKcWCBQu0QdCtWrXiwIED+VzbjO3cuRMPDw/KlSuHXq+nevXqBAQEGC0WkJEVK1bg6uqKhYUFDRs2ZPv27QVQ44ytWrUq3d/3qVOnZpqvKHwmKpv/lQTSUsxnffr0YcKECUb7sup1lro4Z61atQgJCeHy5csEBARw//59Pv744/ysboYePnxIQEAAtWrVIj4+nhUrVtC1a1fCw8Np165dhvkePXpE586dAVi3bh33799n4sSJDBw4sNC+xFavXg1A165dtX9nR1H6XI4fP86OHTto0aIFycnJJCdn76/41MVbFyxYQIMGDfjkk0/o1KkTR48eLZDu7k+6ffs2LVq0YMyYMZQtW5bff/+dWbNm8fvvv2f6R9fXX3/Na6+9xowZM/Dy8mL9+vX06tWLH3/8sUBWZ8/Mt99+azQAv1KlSpmmLxqfSfI/W1ZpSoB8nUSuhHNxcVGjRo3Kcb558+Ypg8Ggbt26pe379NNPlampqbp8+XJeVjHXEhMTlbOzs3rttdcyTbdu3Tql0+nUqVOntH27du1SgDp48GB+VzNdSUlJSimlzp07pwAVHBycrXxF6XNJPQellBoyZIiqV69elnkePHigypQpo6ZNm6btS0hIUC4uLuqNN97Il3rmxmeffaaATK+pq6urGjBggNG+Vq1aKV9f3/yuXoZWrlypAHXjxo1s5ynszyR17tNz135VN++fzXQ7d+3XHM19WlzJ7dMiKKPFOZOTk7N9yzK/mZqaYmtrm+Wtw9DQUBo0aEDt2rW1fR07dsTe3p6dO3fmdzXTZWKSux/7p/lcQkNDtdtpj68G06tXL3Q6HQaDgT/++CPbdcnNOWS2eGthfRbpSV1CLqOfrbNnz/LHH38YnQdA//79+f777zNc368oKiqfSercp1ltJYEExXy2du1a9Ho9VlZWdO3alWPHjmWZJ7eLc+Y3pRSJiYncunWLRYsW8eeff/L6669nmie9c9HpdOlOFlzUPc3n4uvry/DhwwFYuHAhx44dY8OGDWzZsgVIuYXm6uqaL/VOVdiLt2YmKSmJ+Ph4/u///o85c+bQo0cPqlatmm7azM7j4cOHnDt3Lr+rm6l69ephampK9erVmT9/PklJGQeTovKZKKVQKjmLTZ4piqfUo0cPWrRoQZUqVTh79ixz586lbdu2HDlyJNNnBbldnDO/rVixgtdeew0AKysr1q9fT6tWrTLNU1TPJTee9lwWL17M7t27OXv2LEOHDuXSpUsA+Pj4MGrUqLyubhrZWbw1v1cgyIiLiwuXL18GoEuXLqxbty7DtKmLzD75WdjZ2QEU2s+Vo6Mjs2fPpkWLFuh0OrZt28bMmTO5fPlyhs+ci8pnkh8TghdXEhTzgFLK6K9BnU6HqakpH330kbavXbt2dOrUiTp16rBo0SKWLVtWGFXNUkbnAvDCCy/QqFEjbt68SXBwMH379mXz5s34+voWVnUzldm5FAYrKytWr15N+/bt+eWXX4CUFRFWrlyZZmmxkmbnzp3cu3eP48eP8+6779K9e3e+++67Qv28cqpz585apzKATp06YWlpyZIlS5gxYwaOjo6FWLusSEebVHL7NA/88MMPmJmZaZu3t3e66RwdHWnbtq32hZgROzu7dLukR0dHGz3Pyg+ZnYuDgwPNmjWjS5curFixAl9fXyZNmpRpeUX1XHIjL86lTZs2NGvWTHvdo0ePLJcXyyt2dnYkJCSkGRIUHR2NTqfTWlqFoUGDBrRq1YpXX32VrVu3Eh4ezubNm9NNm1rPJz+L1BZkfv9c5UTfvn1JSkri6NGj6b5fVD4TGZLxL2kp5oGmTZty+PBh7bW1deZzCGYlt4tz5oWcnEvTpk0JDQ3NtLw6deqkeY6qlOL06dN07Njx6SqbhaL4uXz55ZccOnRIe7127VpGjBhB69atn6pu2VHYi7dmV4MGDTAzM+Ovv/5K9/3U8zh16pRRB65Tp05hbm5eKENLcquofCYyeP9f0lLMA9bW1jRr1kzbHv9FfdyVK1fYt28fzZs3z7Q8X19fdu/eTUxMjLYvO4tz5oXsngvAvn37svwC8vX15ddff+XPP//U9n3//ffcunUr3xcMzcm5ZMfTfi6XLl1i7NixQEoL0c3NjeTkZIYMGcL9+/efqm7Z8fjirakKcvHW7Dp48CCPHj3K8GerevXquLq6Gp0HwPr16/H29sbc3LwgqpktX3/9NaampjRu3Djd94vKZyK9Tx9TaINBnnHr1q1TAwcOVGvWrFF79uxRX3zxhapRo4ays7NTZ8+e1dJFREQoU1NT9eWXX2r7bt++rRwdHZWHh4fatWuX+u9//6tsbW1zNeYxL2zfvl317dtXrV69WoWHh6tNmzap3r17K0B99dVXWrrz588rU1NTNXv2bG3fw4cPlbu7u6pfv7765ptv1Pr165Wzs7Pq1q1bYZyKUkqpw4cPq+DgYLVs2TIFqAkTJqjg4GAVERGhpcnrzyU5OVl5e3srQNnZ2amrV6+qyMhIZWJiogA1cuTIHJ3DvXv3VHBwsAoODlaenp7K2dlZe339+nWllFJeXl6qRo0aRvnmz5+v9Hq9CgoKUt9//73q3bu3sra2VmfOnMnR8fNKr1691Ny5c9U333yjdu/erRYvXqwqVqyoGjRooBISEpRSSr3yyivK1NTUKF/q+Ne3335bhYeHqxEjRqhSpUqp/fv3F8ZpKKWU6tSpk1qwYIHasWOH2rFjh3r99deVTqdT48aN09IUtc8kdZziySs/qKi7v2S6nbzyQ4kYpyhBMZ9ERkYqT09P5eDgoEqVKqUcHBxU3759jQaxK6VUeHi4AtTKlSuN9p84cUJ5e3srS0tLVb58eTVx4kTtS6KgnTx5UvXs2VM5OTkpc3Nz5eTkpLp06WIURJT6dzB8YGCg0f6oqCj14osvKisrK2Vra6teeeWVQv3FGjJkSOrsx0abh4eHliavP5elS5dqx1m1apW2f8KECQpQOp1OhYWFZfscUq91elt4eLhSSikPDw/l4uJilC85OVnNmzdPVa5cWen1etWiRYtCDSTz589XjRo1UtbW1spgMKh69eqpt956y+jnI/XzetIXX3yhatasqczNzbU/ugrTmDFjVK1atZSlpaXS6/Wqfv366sMPP1TJyclamqL2maQGxeOX96iLdw5luh2/vKdEBEVZT1EIIUqo1PUUf7+8G+syhkzT3om7h3slH1lPUQghxLMtWSWRrLJYTzGL958VEhSFEKKEk6D4LwmKQghRwiWrZJJV5kMusnr/WSFBUQghSrhk9ZAkZZZlmpJAgqIQQpRwKS3FrG6floyWogzezwF/f/9sr3AuhCjesvP7rtPpWLRoUY7Lzm2+/JL6TDGrrSSQlqIQQuRSZGQkLi4uhV2Np5ZMNjralJAZbSQoPiMePHhQZOauFKKkaNmyZWFXIY2EhATMzMxytBB1MklZBr2SEhTl9ulTOHbsGJ07d8ZgMGBjY0OfPn24ePGi9v6wYcNo166d9vrmzZuYmJgYzX169+5dzMzMjOY+PHnyJD179sTGxgaDwUC3bt04c+aM0bF1Oh0LFixgypQpVKxYkfLly+fjmQpRckVERNC4cWMMBgPPPfec0So3T94GVUoxZ84cKlasiJWVFX5+fuzevRudTkdERIRRucnJycyaNYsKFSrg4ODA0KFDuXfvnlGaqKgoXnrpJRwcHLC0tDRadixV1apVefPNN3n//fdxcXHB0tIyx2tKKpWUra0kkKCYS5cuXaJ9+/bcunWLNWvWsHz5cv7v//4PDw8P7ty5A0D79u05fPiwtizM3r170ev1HDlyREuzf/9+EhMTad++PQBnz56ldevW3L59m1WrVrFu3Tpu3LiBt7c3CQkJRnX48MMP+eOPP1ixYgVr1qwpwLMXomS4du0aY8aMYdKkSWzYsIH4+Hh69erFo0eP0k2/dOlSZs2ahb+/PyEhIdSoUYNXX3013bQff/wxf/75J19++SVvv/0269at45133tHej46Opm3bthw9epSlS5eyadMmDAYDXl5eXL9+3aisTZs2sX37dj788EO2bt2KwZD57DRPSkx+mK2tJJDbp7m0ZMkSHj16RFhYmLZ+W+PGjalbty6rVq1i9OjRtG/fnoSEBA4ePIiHhwd79+6lV69ehIWF8dNPP9GlSxf27t2Lq6srFSpUAGD27NnY29vz3Xffaatxt27dmurVq7NixQpGjhyp1cHe3p6QkJASv0CtEPnl9u3b/PDDD9SrVw8Ag8FAhw4dOHjwIG3btjVKm5SUxIIFCxg6dCgLFiwAUhYavnnzJitWrEhTtqOjI2vXrgWgS5cu/N///R8bN27U8gYFBRETE8OhQ4e0O0He3t64urqyaNEi3n//fa2sR48eERoamuNgmCpZqWyMUywZM4JKSzGXfvzxR7y8vIwWNK1Tpw4NGzZk3759AFSrVo3KlSuzd+9eIKWl6OnpSbt27fjhhx+0famtRICwsDB69OhBqVKlSExMJDExETs7Oxo3bmy0NiCkLGUkAVGI/OPk5KQFRIC6desCKbc1nxQVFcXVq1fp0aOH0f6ePXumW/aT64nWrVvXqNywsDA6dOiAvb299l1gamqKh4dHmu8CT0/PXAdESFk6KjmLraQsHSUtxVyKjo6mUaNGafZXqFDB6H5+agsxLi6OX3/9lfbt23Pv3j02btxIQkIChw4d4rXXXtPS37x5k6CgIIKCgtKU/eQ6camtSyFE/rC1tTV6nfo7mPpI5HFXr14FoFy5ckb7M3ren17Zjz8iuXnzJgcOHMDMLO2g+ho1ahi9ftrvguw8MywpzxQlKOaSvb19mvv6AH///Teurq7a6/bt2xMQEEBERAQODg7UqVOHe/fuMWXKFMLDw0lISDDqjGNvb0+3bt2MbpOmenLleGklClF0ODo6AnDjxg2j/el9T2SHvb09Xbp0MXrOmEqv1xu9ftrvApn79F8SFHOpbdu2fPbZZ0RHR2NnZwfA6dOn+e2333jllVe0dKktww8++EC7TdqoUSMsLS1ZsGABzs7OVK1aVUvv4+PD77//TuPGjTE1NS3QcxJC5F7lypWpWLEiW7duNbplumXLllyV5+Pjw5o1a3Bzc3uqW6PZIUHxXxIUc2n8+PGsXLmSTp06MWPGDOLj45k5cyZVqlTB399fS1enTh3Kly/PDz/8wEcffQSAqakpbdq0ITQ0lEGDBhmVO3v2bJo3b07nzp0ZPnw4FSpU4Nq1a/zwww+0a9eOAQMGFORpCiGyydTUlGnTpjFu3DgqVKhAhw4dCA8PZ/fu3QA5GjcIEBAQwNq1a/Hw8GDs2LFUqVKFGzducPDgQZycnBg/fnye1T1RPSJRZf5HeKJKv8fts0Y62uSSs7MzP/zwA3Z2dgwaNIjhw4fTsGFDIiIi0tzmTG0hPt6hxsPDI80+gJo1a3Lo0CHKli3LyJEj6dy5M1OnTuXevXs0aNAgn89KCPE0Ro8eTWBgIP/973/p1asXJ06cYOHChQDY2NjkqKyyZcty4MABGjVqxJQpU+jUqRPjx4/n/PnztGjRIk/rLeMU/6VTqoT0sxWiEERERNChQwcOHz5Ms2bNAJg1axadOnWidevWhVw7URDeeustFi9ezK1bt4rcrFNxcXHY2Niw5fgcDNYWmaa9dyeeF+q9TWxsLGXKlCmgGhY8uX0qRD5q0qQJkZGRuLm5aftmz56NlZWVBMVn0MmTJ1mzZg2tW7fG3NyciIgIFi1axBtvvFHkAuLj5JnivyQoCpGPypQpUyTnxxT5o3Tp0kRGRvKf//yHO3fuUKlSJSZNmsSsWbMKu2qZkkWG/yXPFMUz7/jx43Tt2pWyZctSunRpateubTQbSGRkJF5eXtoctgMHDjTqRn/+/Hl0Oh3/+9//GDFiBLa2tpQvX54PPvgAgK+//pratWtTpkwZXnzxRWJiYrS8ERER6HQ6fv75Z+DfrvOTJk1Cp9MZzYkZHx9PQEAATk5OWFhY0KhRIzZv3mx0LqnLGWU2HyekzMG5aNEiXF1d0ev1VK9enSVLlhiliYqKom/fvlSoUAELCwuqVauWp503SiIXFxf27NnD7du3efToEefPn+fdd9+lVKmi3f5Q2Vg2qqQ8Uyzan5QQeaB79+5UqFCBFStWYGNjw19//aXNHBIZGYmnpyddu3Zl/fr13Lt3j5kzZ9KzZ08iIyONypkxYwa9e/cmODiYLVu2MGHCBG7cuEFERATvv/8+cXFxjB49msmTJ/PZZ5+lW5fIyEhatWrF6NGjGThwIPDvLCmDBg3i22+/Ze7cudSpU4fVq1fTu3dvtmzZYjRLSup8nFOnTsXGxoZp06bRq1cvzpw5ow30Hjt2LF988QUzZsygRYsW7N+/nylTpmBpacmIESMAGDx4MFeuXOGjjz6iQoUKXLx4UQveomR5lJTIo6TMe5c+SkosoNoUMiXEM+zGjRsKUNu2bUv3/fbt26vWrVur5ORkbd/x48eVTqdTO3bsUEopde7cOQWovn37amkSExNVhQoVlMFgUDdv3tT2T5gwQdna2mqvw8PDFaAOHz6s7QPUwoULjerx66+/KkAtX77caH+rVq1UkyZNtNdDhgxROp1O/f7772mO8eOPPyqllPrrr7+UTqdTn376qVFZU6ZMURUrVlRJSUlKKaUMBoP66KOP0r0uomSIjY1VgFrzfxNUyB/TM93W/N8EBajY2NjCrna+ktun4plWtmxZXFxcmDZtGl9++aXR3JL379/np59+ws/Pj6SkJG1+SVdXV5ydndPML/n4XJWmpqZUr16dRo0aUbZsWW2/q6srMTEx3L17N0f1/PHHHwHw8/Mz2t+vXz+OHDlitKRQVvNxpo6L6927t3ZOiYmJ+Pj4cO3aNS5dugSkdAJatGgR//nPf/jrr79yVF/xbMnq1ml2OuI8KyQoimeaTqcjLCwMNzc3Ro0ahbOzM82aNWPv3r1ER0eTlJTE+PHjMTMzM9ouXryoBY9U6c1VmZO5MTMTHR2NmZmZ0QTzkDKnpVLK6DllVse8efMmSikcHByMzik1qKee1/r16/H29mbGjBnUqlWLOnXqEBISkqN6i2dDkkrK1lYSSFAUzzxXV1eCg4OJjo4mIiICvV5P9+7dsbKyQqfTMWPGDA4fPpxmmzlzZoHV0d7enkePHhEdHW20/++//0an06UJhFmVpdPp+Omnn9I9r4YNGwIpc3X+97//5ebNmxw6dIjatWvTr18/zp49m5enJoqB5OTkbG0F4ZtvvqFhw4ZYWFjg6urKypUrs8yT2hnuyS03Pb+lo40oMczMzPDw8GDq1Kn06NGDv//+m1atWnHy5EnefffdAq3Hky3J1LX5goODGT58uLY/ODhY62WaXd7e3gDcunWL7t27Z5nexMSE5s2b8+6777Jt2zb++usvqlevnu3jieKvqIxT3LdvH7169eLVV18lKCiIPXv2MGzYMKytrenTp0+W+efNm0eHDh2010/OLpYdEhTFM+23335jwoQJ9OvXjxo1ahAbG8v8+fOpWrUqNWrUYOHChXh5edGvXz/69++PnZ0dUVFRfPfddwwdOhRPT888r5Obmxtbt26lXbt2GAwGateuTYMGDXjxxRcJCAjgwYMH1K5dmzVr1rB//362bt2ao/JdXV0ZNWoUL7/8MpMmTaJFixY8evSIP/74g/DwcLZs2UJsbCydO3fm5Zdfpnbt2jx8+JClS5dia2tLkyZN8vycRdGmkpNJTs5i6agCaCm+8847tGjRguXLlwPQoUMHzpw5w9tvv52toFirVq2nHhcsQVE80ypWrEjFihWZP38+ly9fxsbGhnbt2rFmzRpMTU1p3bo1+/btIzAwkKFDh/Lw4UMqV66Mt7c3NWvWzJc6ffLJJ4wdOxZfX18ePHhAeHg4np6erFmzhunTp7NgwQJu375NnTp12LhxY7Zae0/66KOPqF27Np9++ilz5szBysqK2rVrax15LCwsqF+/PkuXLuXixYtYWlrSrFkzwsLCcHBwyOtTFkXco6RHlErKfPmprIZsPK2EhATCw8ONxhAD9O/fn6+++orz588brSiUX2TuUyGEKKFS5z5d+uNgLK3MM0374O5DRrdbzaVLl4zmPtXr9WnWd8yNEydOUK9ePUJDQ+nSpYu2/88//8TV1TXN/sedP3+eatWq4eDgwO3btylbtiw9e/bkvffeS9N5LSvS0UYIIUq45OSkbG2QskKQjY2Nts2fPz9P6pDayezJTmWp69Xevn07w7x6vZ433niDL774gj179jBx4kQ2bNiAt7c3jx7lrIUrt0+FEKKEy8ncp+m1FDMSGxvL1atXszz+03bscnR0ZNmyZdprDw8P6tWrx/PPP8/mzZvp27dvtsuSoCiEECVcdsYhpr5fpkyZbC8dFRwczGuvvZZlupMnT2otwtjYWKP3UluQOb0N2rVrVwwGA7/88kuOgqLcPhVCiBIu+Z/ep5lvOe99+uqrr6KUynKrU6cONWrUwMzMjFOnThmVkfq6Tp06eXKuWZGgKIQQJVxS8iMSkzLfkpLzt/epXq+nQ4cObNy40Wj/+vXrcXNzy3HP0+3bt3Pv3j2aN2+eo3xy+1QIIUq4lNunmbeRCmKat7feegtPT09GjhxJ3759CQ8PZ926daxfv94oXalSpRgyZAgrVqwAYMKECZiYmNCyZUtsbW05dOgQ8+fPp1mzZrzwwgs5qoMERSGEKOGyM41bQUzz1rZtW0JCQpg5cyYrVqygSpUqfPHFF2kmyk9KSiIp6d8gXbduXZYtW8Znn33G/fv3qVSpEsOGDWP27Nk5XstSxikKIUQJlTpOcVZoZywMZpmmjb/3iFm+u4iNjc12R5viSFqKQghRwqV0pMn89mlW08A9KyQoCiFECZekkrMxJKNgVskobBIUhRCihEtKekRiUuZP0pKSEguoNoVLgqIQQpRwycmK5OTMg2JW7z8rJCgKIUQJl5Np3p51EhSFEKKEk5bivyQoCiFECZesshEUS8joPQmKQghRwiUrlWXQk6AohBCiREhMTMI0UZdlmpJAgqIQQpRw0lL8lwRFIYQo4VRy1h1pSkjnUwmKQghR0qUMycj89qkMyRBCCFEiyJCMf0lQFEKIEk6C4r8kKAohRAmnlCKrVQRLyiqDma8VIoTIF/7+/ri7uxd2NYQAIDEpicTELLYkGZIhhMgnb731Fvfu3SvsaggByJCMx0lQFKIQ1KhRo7CrIIRGnin+S26fCpEDqbc9IyIiaNy4MQaDgeeee45ffvlFSxMfH09AQABOTk5YWFjQqFEjNm/enG45qWJiYnjttdeoVKkSFhYWODs7079/f6M8UVFRvPTSSzg4OGBpaUn79u2NjitEbiUnJ2drKwkkKAqRQ9euXWPMmDFMmjSJDRs2EB8fT69evXj06BEAgwYN4tNPP2Xy5Mls2bKFunXr0rt3b7Zt25ZhmQEBAWzfvp158+axa9cuFi5ciF6v196Pjo6mbdu2HD16lKVLl7Jp0yYMBgNeXl5cv349389ZPNvUP7dPM9tKSkcbuX0qRA7dvn2bH374gXr16gFgMBjo0KEDBw8epEyZMoSEhLB8+XJef/11ALp06cL58+eZPXs2PXr0SLfMQ4cOMXDgQIYMGaLte7ylGBQURExMDIcOHaJ8+fIAeHt74+rqyqJFi3j//ffz63RFCSC3T/8lQVGIHHJyctICIkDdunWBlNubt27dAsDPz88oT79+/Rg/fjz37t3DYDCkKbNJkyasWrUKR0dHunTpkqZnalhYGB06dMDe3p7ExEQATE1N8fDw4PDhw3l6fqLkSUxKQpdF51LpfSqESJetra3Ra3NzcyDlWWJ0dDRmZmbY29sbpalQoQJKKWJiYtINikuXLsXe3p7FixczadIknJ2dmTZtGm+88QYAN2/e5MCBA5iZmaXJK512xNNKzsbcpyXkkaIERSHykr29PY8ePSI6Oho7Oztt/99//41Op0sTUFPZ2NgQFBREUFAQx44d48MPP2TkyJG4u7vTrl077O3t6dKlC++8806avI8/exQiN+T26b+ko40Qeaht27YABAcHG+0PDg7WeqtmpX79+ixZsgSAkydPAuDj48OJEydwc3OjWbNmRlv9+vXz+CxESZNVJ5vsjGN8VkhLUYg81KBBA1588UUCAgJ48OABtWvXZs2aNezfv5+tW7dmmK9Nmzb06tULd3d3TE1NWb16Nebm5rRr1w5I6Z26du1aPDw8GDt2LFWqVOHGjRscPHgQJycnxo8fX1CnKJ5BySobLUUJikKI3FizZg3Tp09nwYIF3L59mzp16rBx40a6d++eYZ42bdqwevVqzp07h4mJCfXr1+ebb77Bzc0NgLJly3LgwAFmzpzJlClTuHXrFuXLl6dly5b06tWroE5NPKNUctbrJZaQlaPQqZIy+EQIIYSRuLg4bGxs8Hm3CqUsMn+alhifzO6ZF4mNjaVMmTIFVMOCJy1FIYQo4VJ6n2adpiSQoCiEECWcSlaoLJ4pZvX+s0KCohBClHDSUvyXBEUhhCjhpKPNvyQoCiFECSfrKf5LgmI+Sk5O5sqVK1hbW6PT6Qq7OkKIIk4pxZ07d3BycsLEpODmVklKAl1i1mlKAgmK+ejKlSs4OzsXdjWEEMXMpUuXqFy5coEdTzra/EuCYj6ytrYG4K8//9T+nVfy88dT2rT/0j3Mnwcp6mxcvpQLcPE/4flS7pFvfsqXcgFuP8i/65FfdLq8b8nFq0cEqh15/n2RFelo86+nCor+/v78/PPP/P7773lVn2dK6i1Ta2vrPB/sKkGxYORbULTKv0/QytwyX8q11JnnS7kAFrq0q38UdSb5EBQBUBT44xalUras0pQETxUU33rrLe7du5dXdRFCCFEIZJWMfz1VUCyoddwePHiApWX+/PUrhBAlXjaGZFBCbp8+Vfvf399fWyF81apV6HQ6jhw5gq+vLwaDgVq1arF69eo0+Xbs2EGbNm0oXbo0dnZ2eHp6cuTIEQAiIiLQ6XTs2LGDPn36UKZMGW0V85iYGEaOHImjoyN6vZ6mTZsSFhaWpuyOHTtSvnx5ypQpQ4sWLfj222+N0sTExPDaa69RqVIlLCwscHZ2pn///kZpoqKieOmll3BwcMDS0pL27dvzyy+/PM3lEkKIIin1mWJWW0mQ5zfFBw0aRKdOndiyZQuNGzfG399fWxMOYP369XTv3p3y5cuzbt061q5dS5s2bbh8+bJROcOHD6dGjRps3ryZiRMn8vDhQzp27Mj27duZO3cu27Zto27dunTr1o1jx45p+c6dO0f37t353//+x6ZNm2jTpg1du3YlIiJCSxMQEMD27duZN28eu3btYuHChUYLtUZHR9O2bVuOHj3K0qVL2bRpEwaDAS8vL65fv57huSckJBAXF2e0CSFEUZeUpLK1lQR53vv0zTffZOTIkQC0bt2aHTt2sGnTJmbOnIlSiokTJ9KpUyc2b96s5enatWuacnr06MF7772nvV65ciVHjx7l119/pW7dugB07tyZP//8k3feeYcNGzZox0+VnJxMhw4dOH78OJ999hmenp4AHDp0iIEDBzJkyBAt7eMtxaCgIGJiYjh06BDly5cHwNvbG1dXVxYtWsT777+f7rnPnz+f2bNn5+h6CSFEYZMZbf6V5y3FTp06af82GAy4uLgQFRUFwOnTp4mKiuKVV17Jspxu3boZvQ4LC6N+/fq4urqSmJiobR07duTw4cNauqioKIYMGUKlSpUoVaoUZmZmhIWF8ccff2hpmjRpwqpVq1i0aFG6PWfDwsLo0KED9vb22nFMTU3x8PAwOtaTpk2bRmxsrLZdunQpy/MUQojClqyycfu0ABqK3333HQMHDqRGjRrodDqjRk5WYmNjGTZsGPb29lhbW9OnTx+uXr2a4zrkeUvR1tbW6LW5uTnx8fEA3Lp1CwAnJ6csy6lQoYLR65s3b3LkyBHMzNJ23TY1NQVSWoY9evQgNjaWOXPmULNmTQwGA2+//TYXL17U0i9duhR7e3sWL17MpEmTcHZ2Ztq0abzxxhvasQ4cOJDusTLrXKTX641uwwohRHFQVAbvf/vtt/z66694eHhw+/btHOXt168fx48fZ/ny5VhYWDBjxgx8fX35+eefKVUq+6GuQAfvly1bFkiZ6SUrT47Tsbe3p0GDBqxYsSLDPH/99RdHjhxhy5Yt9OzZU9v/4MEDo3Q2NjYEBQURFBTEsWPH+PDDDxk5ciTu7u60a9cOe3t7unTpwjvvvJPmGBL0hBDPmuRk0BWBwfsLFy5k8eLFAOzZsyfb+SIjI9m1axe7du3S7lbWrl0bNzc3QkJC6Nu3b7bLKtCgWLt2bSpXrszKlStzVEkAHx8fdu7ciZOTU4YtzdTgZ27+7yDjCxcu8NNPP+Hq6ppunvr167NkyRJWrFjByZMnadeuHT4+PqxZswY3NzcMBkOO6imEEMVNURm8n9v5XkNDQ7G1taVjx47avtq1a9OoUSN27txZdIOiTqdj0aJFDBgwgN69ezN48GD0ej2RkZE0b96c559/PsO8gwcP5tNPP8XT05OJEyfi6upKTEwMR44c4eHDh8yfP586depQuXJlpk6dSlJSEnfv3iUwMJBKlSoZldWmTRt69eqFu7s7pqamrF69GnNzc9q1awek9E5du3YtHh4ejB07lipVqnDjxg0OHjyIk5MT48ePz9frJIQQBenRg2SSErMYvP8o5f0ne9UXhcdGp06donbt2mnuMLq5uXHq1KkclVXgc5/269eP0qVLM3fuXPr374+FhQVNmjShV69emebT6/Xs2bOHWbNmMXfuXK5evYqDgwONGzfWervq9XpCQkIYNWoUfn5+ODs7M3PmTPbs2cPPP/+sldWmTRtWr17NuXPnMDExoX79+nzzzTe4ubkBKbd5Dxw4wMyZM5kyZQq3bt2ifPnytGzZMst6CiFEcWFubk7FihU5+9W1bKW3srJKs8hBYGAgs2bNyofaZV90dHSa/iwAdnZ2OX42qVOqpMxoV/Di4uKwsbHh72vXitXcp/mlWM6pWgwvtC4xfx7+JJ3I2ZdLTnzXNf1hTk8r6s6NfCkXIDE5i7WWciFePWKq2kZsbGyef2dkeMz4eB4+fJittEqpNK2xzFqKsbGx2eoBWr16daPHXgBVq1bl+eef5+OPP84yf8eOHTE1NU0zUcubb76ZZvRBVmSVDCGEKMEsLCywsLDIl7KDg4N57bXXskx38uRJ6tSpk+vj2NnZpTsELjo6Gnt7+xyVVXCrWOaxx6eYy28xMTHMmjWLEydOFMjxhBDiWfDqq6+ilMpye5qACFCnTh1Onz7Nkzc+T506leOyi21QLEgxMTHMnj1bgqIQQhRBvr6+REdH8/3332v7/vjjD44cOZLujGmZkdunQgghioQLFy5os4bdv3+fM2fOsHHjRgD69OmjpStVqhRDhgzRxq23atWKzp0788orr7B48WJt8H6DBg148cUXc1SHYt9SjIiIoHHjxhgMBp577jmjlSyUUixatAhXV1f0ej3Vq1dnyZIlRvlPnTpF//79cXZ2pnTp0tStW5fFixeT/M9I1fPnz1OtWjUA/Pz80Ol06HQ6zp8/X2DnKIQQJUF4eDh+fn74+flx48YNvv32W+3145KSkkhKSjLat379ejp27Mjw4cMZOHAgtWrVYufOnTmazQaKeUvx2rVrjBkzhqlTp2JjY8O0adPo1asXZ86cwczMjLFjx/LFF18wY8YMWrRowf79+5kyZQqWlpaMGDECgMuXL1O7dm0GDRqEtbU1R48eJTAwUBvj6OjoSEhICC+++CLz5s2jQ4cOADg6OqapT0JCAgkJCdprWSVDCCGyz9/fH39//yzTpTdowsbGhhUrVmQ661l2FOugePv2bX744Qfq1asHpExA3qFDBw4ePIijoyMff/wxy5cvZ/jw4UDKrDj3799n9uzZDB8+HBMTE7y9vfH29gZSLnTbtm25f/8+H3/8MYGBgej1eho3bgxArVq1aNmyZYb1kVUyhBCieCvWt0+dnJy0gAhoS0pFRUWxe/duAHr37m20qoaPjw/Xrl3Tuu/Gx8cTGBhIzZo10ev1mJmZMWPGDK5evcrdu3dzVB9ZJUMIIYq3Yt1STG9FDkgJdDdv3kQphYODQ7p5L126hIuLC1OmTOHzzz8nMDCQpk2bYmtry9atW3n33XeJj4/Hysoq2/UpCtMdCSGEyL1iHRQzY29vj06nY9++fWlmSoCUyWIhZXDp66+/zpQpU7T3duzYUWD1FEIIUXQ8s0Ex9TnhrVu36N69e4bpHjx4YBQ0k5KS+Prrr43SPN4CFUII8ex6ZoOiq6sro0aN4uWXX2bSpEm0aNGCR48e8ccffxAeHs6WLVuAlDnzPv/8c+rWrYuDgwPLli0z6kEKULFiRWxtbfnqq6+oVq0aer2eBg0apNsCFUIIUXwV6442Wfnoo4949913+frrr+nWrRsvvfQS69evx8PDQ0uzdOlSPDw8GD16NMOGDaN+/fpMnz7dqBwTExNWrlzJuXPn8Pb2pnnz5tlaKFkIIUTxIqtk5KP8XCWjWMrHlbuTj93Ml3L/3vZL1olyITribL6UC3Dit+P5Uu6tB7H5Ui5AsnwNASmrZEznmwJdJUMYe6ZbikIIIURO5Cgo5vfKFI0aNcrWbAZCCCFEfpCWohBCCPGPYhUUlVJpeoYKIYQQeSVXQTE0NBR3d3csLCxo2rQpBw4cMHp/1apVNGjQAAsLCypVqsSMGTPSzGi+f/9+mjZtioWFBe7u7oSGhqY5Turt2p07d9KwYUP0ej3ffPMNACEhITRq1AgLCwucnJwICAhIM47wwoUL9OnTBxsbGwwGA507d+bYsWNGaapWrcqbb75JUFAQzs7OWFtb4+/vT0JCAkePHqVNmzbaChxP5hVCCPFsyfE4xatXrzJy5EhmzZqFnZ0dCxYsoHPnzvz555+UL1+eDz74gMmTJzN+/HgWL17MyZMntaC4YMECIGV1i86dO1O/fn02bNhAdHQ0b7zxBvfu3aNRo0ZGx7ty5Qpjxoxh5syZVKlShSpVqrBt2zb69OlD//79WbBgAadOnWL69OlcvHhRW3vrzp07eHp6YmJiwvLly7GwsGDu3Lm0b9+e3377DWdnZ+0YW7duxd3dnU8//ZSzZ88SEBCAubk5kZGRBAQEUKFCBaZMmYKfnx8nTpzAxCT9vyVklQwhhCjechwUb9++TXBwMF5eXgB4eHjg7OzMkiVLmD59OoGBgUyePJl58+YBKYPjzc3NCQgIYNKkSZQtW5agoCB0Oh2hoaHY2NgA4OzsrM1C87jo6GhCQ0Np0aKFtq9v3760bNmSdevWAdClSxdKly7N66+/zrFjx6hfvz4rV67kwoULHD9+HDc3N62uVapUISgoiMWLFxsdZ+vWrdpg/IiICD7//HNCQ0Pp0qULAMnJyXTv3p1jx47RsGHDdK+NrJIhhBDFW45vn9rY2GgBMfW1j48PBw8eZP/+/dy9exc/P780K1M8ePCA33//HYCDBw/SoUMHLSACeHl5YW9vn+Z4ZcuWNQqId+/e5ejRo0arMAP069cPgH379gHw448/4u7urgVESJkPtWPHjlqaVB4eHkaz07i6umJiYmJ0nq6urgCZrnwhq2QIIUTxluOWYrly5dLsq1ChAidPnuTmzZQB1E2aNEk3b2qQuHr1KjVr1kzzfvny5dMt+3ExMTEopdLst7GxQa/Xc/v2bSClhflkmtTyUoNzqvRW27C0tDQKlNmZ/1RWyRBCiOItx0Hxxo0bafb9/fffODo6ai29kJAQo2d2qapVqwakrFp//fr1NO+nt0+n0xm9trW1RafTpUkbGxtLQkKCVgd7e3tOnz6dbl3Ta5EKIYQQOb59Ghsby549e4xe7969mxYtWtCqVStKly5NVFQUzZo1S7OVLVsWgOeee47w8HBiY/+dNmrPnj1aKy8zVlZWNGrUSOtQk2rDhg0AtG3bVvv/sWPHjAJjdHQ0u3fv1tIIIYQQj8txULS3t2fYsGGsXr2abdu24evri1KKcePGYWtry5w5c5g8eTJTpkwhNDSUsLAwli9fjq+vL/fv3wdg3LhxJCcn4+vry7Zt2/jyyy955ZVXtKCZlVmzZhEZGclLL73Et99+y4cffsi4cePo3bs39evXB2Do0KG4uLjQrVs3vv76a7Zs2UKnTp0oVaoU48aNy+lpCyGEKAFyfPvU0dGR9957j0mTJnHmzBnq1avHrl27tOd3EyZMoFKlSnzwwQcsXboUMzMzatSowfPPP689l3N0dCQ0NJQxY8bg5+dHjRo1+OSTT5gxY0a26tCjRw+Cg4OZM2cOPXv2xN7enuHDhzN//nwtjbW1NREREQQEBDB8+HCSkpJo06YNe/fuTffWbn5InWv9zp07BXK8Ii8/JwS/lz/X+E7C/Xwp925i/q3N+UA9zJdy49WjfCkXZELwVPGkXGNZp6HwyCoZ+SgqKqrAArAQ4tlx6dIlKleuXNjVKJEkKOaj5ORkrly5grW1dZoOQ+mJi4vD2dmZS5cu5emyMcWt3Pwsu7iVm59lS53zv9yclq2U4s6dOzg5OWU4SYjIXzm+fSqyz8TEJFd/7ZUpUyZf1lIrbuXmZ9nFrdz8LFvqnP/l5qTsx8dvi4Inf4oIIYQQ/5CgKIQQQvxDgmIRotfrCQwMzPNZcYpbuflZdnErNz/Lljrnf7n5XbbIe9LRRgghhPiHtBSFEEKIf0hQFEIIIf4hQVEIIYT4hwRFkW/++usvRowYQaNGjShVqhTu7u7ppluxYgWurq5YWFjQsGFDtm/fnq3yr1y5Qu/evbG2tsbe3p5XX32VuLi4vDyFAnP37l0qV66MTqfj559/zjStUooFCxZQpUoVLC0tadWqFQcOHCigmuaf7P68POlZvR6icEhQFPnm+PHj7Nixg5o1a1K3bt1003z99de89tpr9OvXj9DQUFq1akWvXr2y/FJ79OgRnTt35o8//mDdunX85z//YdeuXQwcODA/TiXfvfPOOyQmJmYr7XvvvUdgYCDjx49n+/btODo60qlTJ86ePZvPtcxf2fl5Sc+zej1EIVFC5JOkpCTt30OGDFH16tVLk8bV1VUNGDDAaF+rVq2Ur69vpmWvW7dO6XQ6derUKW3frl27FKAOHjz4lDUvWCdPnlQGg0EtX75cAerw4cMZpn3w4IEqU6aMmjZtmrYvISFBubi4qDfeeKMgqptvsvPz8qRn+XqIwiEtxUIUGhqKTqdDp9MZrRDSq1cvdDodBoOBP/74oxBr+HSymrvx7Nmz/PHHH/Tt29dof//+/fn+++9JSEjIMG9oaCgNGjSgdu3a2r6OHTtib2/Pzp07n67iBWz06NGMGDHC6Fwysn//fuLi4oyumbm5OS+++GKxO+8n5Wauz2f5eojCIUGxEPn6+jJ8+HAAFi5cyLFjx9iwYQNbtmwBUm4Lubq6FmIN89epU6cAqFOnjtF+Nzc3Hj58yLlz5zLN+2Q+nU5HnTp1tHKLg40bN3Ls2DHefvvtbKXP7JpdvHiRBw8e5HkdizK5HiKvSVAsZIsXL6Z69eo8evSIoUOHMnr0aAB8fHwYNWpUIdcuf0VHRwNga2trtN/Ozg6A27dvZ5r3yXypeTPLV5Tcv3+fgIAA5s2bl+1JqKOjo9Hr9VhYWBjtt7OzQymlXdOSQq6HyGsSFAuZlZUVq1evxsTEhF9++YXr169jY2PDypUrs7XclCi+3n33XSpUqMDQoUMLuypCiH9IUCwC2rRpQ7NmzbTXPXr0KBELjKa2CGNjY432p/51b29vn2neJ/Ol5s0sX1Fx4cIFFi9ezOzZs4mNjSUmJoa7d+8CKcMzUv/9JDs7OxISEoiPjzfaHx0djU6n065pSSHXQ+Q1CYpFwJdffsmhQ4e012vXrmX//v2FWKOCkfoc6MlngKdOncLc3Jzq1atnmvfJfEopTp8+neb5UlF07tw5Hj58SLdu3bCzs8POzo7u3bsD0KFDB3x8fNLNl3pup0+fNtp/6tQpbZxeSSLXQ+Q1CYqF7NKlS4wdOxZIaSG6ubmRnJzMkCFDuH//fiHXLn9Vr14dV1dXgoODjfavX78eb29vzM3NM8zr6+vLr7/+yp9//qnt+/7777l16xZdu3bNtzrnlUaNGhEeHm60LVmyBIDly5ezbNmydPO1bt2aMmXKGF2zR48eERISUizOO6/J9RB5rpCHhJRoycnJytvbWwHKzs5OXb16VUVGRioTExMFqJEjRxZ2FZ/KvXv3VHBwsAoODlaenp7K2dlZe339+nWl1L/jDd9++20VHh6uRowYoUqVKqX279+vlXP+/HllamqqZs+ere17+PChcnd3V/Xr11fffPONWr9+vXJ2dlbdunUr8PPMK+Hh4WnGKXp5eakaNWoYpZs/f77S6/UqKChIff/996p3797K2tpanTlzpqCrnKey8/NSkq6HKBwSFAvR0qVLFaAAtWrVKm3/hAkTFKB0Op0KCwsrxBo+nXPnzmnn9+QWHh6upfviiy9UzZo1lbm5uRbk0isnMDDQaH9UVJR68cUXlZWVlbK1tVWvvPKKio2NLYAzyx/pBUUPDw/l4uJilC45OVnNmzdPVa5cWen1etWiRQujPyKKq+z8vJSk6yEKh6ynKIQQQvxDnikKIYQQ/5CgKIQQQvxDgqIQQgjxDwmKQgghxD8kKAohhBD/kKAohBB5yN/fH3d398KuhsglGZIhhBB56MyZM9y7d48GDRoUdlVELkhQFEKIYighIQEzM7NcLc4sMiZXUwgh+Pe2Z0REBI0bN8ZgMPDcc8/xyy+/aGni4+MJCAjAyckJCwsLGjVqxObNm9MtJ1VMTAyvvfYalSpVwsLCAmdnZ/r372+UJyoqipdeegkHBwcsLS1p37690XEBqlatyptvvsn777+Pi4sLlpaWxWbt0OKkVGFX4P/bu/uomLM/DuDvbxlFk9FMTcTWyGYm5KQ1NqUtT4mRisXpoHDW7mIfaLdVrUU2Nlsd0WKd46Fi7dlCJQ+d7PFURK2TpVhCrNpMtYY2amum+/tD8z2+zUNp2fan+zpn/pj7vY9fztzu/d75DEVR1H/FgwcP8MknnyAiIgICgQCRkZEICgrC7du3wePxMHfuXOTk5GD9+vWQyWRITU3FzJkzkZmZienTp+utMywsDMePH0dsbCwkEgmqqqpw/Phx9rpKpcLYsWPB5/ORlJQEgUCApKQkjB8/HmVlZRCLxWzegwcPwsnJCZs3b4apqSksLCxe+T3pdroyxhxFUdR/RWhoKGEYhpSUlLBp2ni0eXl55NdffyUAyPfff88pN2bMGOLm5sapZ9iwYez7YcOGkbCwMIPtrl69mggEAqJUKtm0xsZGYm9vT8LDw9k0BwcHIhKJSH19/T8aJ2Uc3T6lKIpqZWdnh2HDhrHvhw4dCuDZ9mZeXh4AYNasWZwyc+bMQXFxMZ48eaK3Tjc3NyQnJyM+Ph4lJSU613NzczFu3DgIhUKo1Wqo1WqYmprC29sbRUVFnLw+Pj50dfiK0UmRoiiqVd++fTnvtb/p2djYCJVKBR6PB6FQyMlja2sLQggePXqkt86kpCTMnz8fCQkJcHFxgb29PbZv385er62tRWZmJng8Hue1d+9e3L9/X6ct6tWizxQpiqI6QCgUorm5GSqVClZWVmy6UqkEwzA6E6qWQCBAYmIiEhMTcfXqVWzevBlLly7F8OHD4eXlBaFQCD8/P3z99dc6Zc3MzDjvGYZ5qWOidNGVIkVRVAeMHTsWAJCens5JT09PZ0+rtsfFxQWbNm0CAFy/fh0AMHHiRFy7dg3Ozs4YNWoU5+Xi4vKSR0G1h64UKYqiOmDEiBGYMWMGwsLC0NDQAKlUin379uH8+fPIysoyWM7T0xNBQUEYPnw4TE1NkZqaip49e8LLywvAs9OpP/zwA7y9vfHpp5/C3t4eNTU1uHjxIuzs7LBixYp/a4gU6KRIURTVYfv27UNUVBRiY2Px8OFDyGQyHDhwAP7+/gbLeHp6IjU1FeXl5TAxMYGLiwuys7Ph7OwMABCJRLhw4QJWrVqFlStX4s8//4RYLIa7uzuCgoL+raFRWl19/JX672t7xPy/3IdNmzaRo0eP/gs96hxvb2+iUCi6uhvtKi4uJmvWrCFPnjzhpO/Zs4cAIDU1NS+lHaVSSfh8Prl69arRfJs2bSLPf1yVl5cTAOyLYRhiZ2dHgoODyd27dzllJ06cSGJiYl5Kf6nXH32mSLXrq6++wv79+7u6Gx2SmJiIY8eOdXU3DNq2bRsSEhK6uhvtunz5MqKjo/H06VNOukKhQEFBgcFDJS9q/fr18PHx6XQA7Q0bNqCgoAD5+fmIjY1FQUEBpk6dCo1Gw+aJiopCfHw8VCrVS+kz9Xqj26dUuwYPHtzVXXhtaL/31hUaGhrQq1evf1SHjY0NbGxsXkp/6uvrsWvXLuzdu7fTdTg5OcHd3R0A4OHhgT59+iAwMBA3btxg7/W4ceNgZWWFlJQULF++/GV0nXqN0ZUihdLSUkydOhUikQi9e/eGVCrFt99+y17X91M4+fn5GDlyJMzNzTFixAicOHECrq6uWLBggU45Y7EkASAhIQFyuRwCgQBisRjTpk3DzZs3X3gcEokE9+7dw9atW8EwDBiGQXJyMgCgpaUFMTExkEgkMDMzg0wmw44dOzpUr4+PD6ZNm4YDBw5AKpWCz+dj/PjxuH37Niffw4cPsWjRIjZ+pYeHB86ePau3Lq2KigrMnj0btra2MDc3x6BBg3QOVly/fh0BAQEQCASwsLCAQqHQabutu3fvsuNfvHgxRCIRRo8eDQA4evQoJk2aBLFYjD59+uDtt99GTk4OWzY5ORkLFy4E8GwSZBgGEomEvcYwDGpra19o3PocOHAAADBlyhROel1dHUJCQmBpaQkbGxt88cUXUKvV7dYHAJaWlgCA5uZmTvqsWbOQkpLSoTqo7o1OihT8/f2hUqmwa9cuHD16FJ9//rnB6BwAUFVVBT8/P1haWiItLQ3h4eFYsmQJKisrdfJqY0mGh4cjLS0NjY2NCAoK4nxoVVRU4KOPPkJWVhZ27tyJlpYWeHh4vHCw44yMDPTr1w/vvvsuCgoKUFBQAIVCAQAIDw/H2rVrsWDBAmRnZ8PX1xcffvghvvvuuw7VffnyZcTFxSE2NhbJycm4desW5s2bx17XaDSYMmUKsrOzsXHjRqSnp4PP52PSpEk6fwQ8LyQkBFeuXMGWLVuQk5OD6OhoztbfnTt32HuRnJyM/fv3o6amBhMmTMDff//dbr8jIyNBCMGPP/6IuLg4AEB5eTn8/f2xd+9eHDx4EJ6enpg6dSpOnz4N4NkW6apVqwAAOTk5KCgo0Al6/U/HDQA///wz3NzcYG5uzklftGgRMjIyEBsbi5SUFFy7dg2JiYl662hpaYFarUZTUxOuX7+OtWvXQiaT6fwR5+HhgcuXL6Ompqa9W0Z1d139UJPqWjU1NQQAOXz4sME8bQ+5hIeHE4FAQOrq6ti0vLw8AoCEhoZyyhmLJamPWq0mT58+JXw+n+zYscNgHwxxcHAgy5Yt0xkjj8cjERERnPTg4GBiY2ND1Gq10Tq9vb2JhYUFqa6uZtO0B07u379PCCEkKyuLACA5OTlsnqamJmJvb09mzJjBqev5gzYWFhZky5YtBtsOCQkhjo6OpKGhgU2rrq4mfD6fbN261WA57UEUPz8/o2PTaDSkubmZ+Pr6kuDgYJ3xtT1Q0za9o+PWZ8iQITr/VqWlpYRhGLJr1y42Ta1Wk0GDBhk9aKN92dvbk9LSUoP348iRI0b7RFF0pdjNiUQiODg4IDIyEikpKaioqGi3TFFREcaNG8duVQHPvtjcNvwVYDyWpNaFCxcwadIkiEQi9OjRA71790Z9fb3RLVRtjEjty5iLFy+iublZb8zKmpoath2NRmOwTldXV86ztLbjyMvLQ58+fTB58mQ2D4/Hw4wZM5Cfn2+wb25uboiPj8f27dtx69Ytneu5ubmYPn06evTowfbLysoKI0eO1ImLqY92pfy8iooKhIaGYsCAAejRowd4PB5yc3M7tWXd2XEDz3Yc2j6fLCoqAiGE81UEU1NTBAYG6q1j48aNKCoqQmFhITIyMmBnZwc/Pz+dXQtra2u2TYoyhk6K3RzDMMjNzYWzszOWLVuGN954A6NGjTL6TEjfhxkAzk/caBmLJQkAv//+O3x9faHRaLBjxw6cO3cORUVFEIvFbB592saJNEZ76rBt3Ejte+027eDBgzl13r17t8PjUKlUesdva2trdBv4p59+woQJE/Dll1/CyckJMpkMhw4dYq/X1tYiMTFRZ7x5eXk6cTH1aTvmlpYWTJ8+Hfn5+Vi3bh1OnTqFoqIiTJkyxej9NqSz4wae3bu2YcyqqqrA4/E4YdT0jUPL0dERo0aNglwuR2BgIA4fPozKyko2aoyWtp2GhoZ2x0R1b/T0KYUhQ4YgPT0dzc3NOH/+PKKiouDv74/Kykrw+Xyd/P3799f7bKa6uvqF287JyUF9fT0OHTrETjxqtbrdD9SOrJK0tCvY6upqDBgwgE1XKpWc69nZ2ZzndHZ2di/Uhr7xK5VKvStorf79+2P37t3YuXMnLl26hJiYGMyZMwc3btyAo6MjhEIhFAoFli5dqlP2+ZW6IW1jZd66dQvFxcXIzMxEQEAAm97ZyaKz49aWbRtEu3///gbji3aEjY0NrK2tUVpayknXtiMSiTpUD9V90ZUixeLxePD29kZERATq6urwxx9/6M0nl8tx8uRJ/PXXX2xaXl5ep34FvKGhAQzDcFZ7aWlp7W6Jto0RqdWzZ0+dFc/o0aPB4/F0YlampaVBLBZjyJAhAJ7FpXy+Tu1qsCPGjh2Luro65ObmsmlqtRoZGRlszExjTExMIJfLERMTA7VazW6lTpw4ESUlJRg5cqTOmKVSaYf7p6Wd/J4f271793Du3DlOvrYrYUP+ybilUinKy8s5aXK5HAA4B3s0Gg0yMzON1qWlVCpRW1vLbpdqaVf9nblnVPdCV4rd3JUrV/DZZ59hzpw5GDx4MB4/foxvvvkGEonE4PcTV6xYgW3btkGhUCA8PByPHj1CdHQ0rK2tYWLyYn9njR8/HgCwcOFCfPDBBygtLUVCQkKnvxzu7OyMkydP4sSJE7CyssKgQYNgbW2Njz/+GHFxcTA3N4e7uzuOHTuG/fv3IykpCaampp1q63kKhQKjR4/GvHnzEBsbC1tbWyQlJaGqqgpRUVF6yzx+/BiTJ0/G/PnzIZVK0dTUhKSkJPTt2xdubm4AgOjoaMjlckyePBnvv/8+bG1t8eDBA5w5cwZeXl4IDg5+oX7KZDIMHDgQERER0Gg0qK+vx5o1azgraABsCLKtW7ciMDAQvXv31hucujPj1vL09ERaWhonbejQoQgKCsLy5cvR2NgIiUSCbdu2oampSW8dZWVluHDhAgghqKysRFxcHBiGweLFizn5fvnlF/D5fLi6urZ3i6jurqtP+lBdS6lUknnz5hFHR0diZmZGxGIxmTlzJrl58yabR9/Jz7NnzxJXV1fSs2dP4uzsTI4cOUIkEglZvny50XIqlYoAIHv27GHTUlNTiaOjIzE3Nyfu7u6ksLBQ5xRpR0+flpSUEC8vL2JpaclpR6PRkHXr1hF7e3vC4/GIk5OTzi+oG6IvNFtxcTEBQE6dOsWm1dbWkgULFhChUEjMzMzImDFjyOnTpw3W1djYSN577z0ilUpJr169iFAoJL6+vqSwsJBT5ubNm2T27NlEJBIRMzMzIpFISEhICOdUb1va05bp6ek61woLC4lcLifm5ubEycmJpKSk6L2/a9euJQMHDiQmJibEwcGBEKL/VGpHxq3PpUuXCADO/zVCnv0fmTt3LrGwsCAikYiEhYWRuLi4dk+fWltbkwkTJpAzZ87otOXv70/mz5/fbp8oiiGEkC6aj6nXSFlZGWQyGXbv3o3Q0NCu7g71f+Ktt95CQEAAVq9e/craUKlU6NevH06cOIF33nnnlbVDvR7opEh1SmRkJEaMGAE7OzvcuXMHGzZsQENDA3777Te9h3MoSp+srCwsWbIE5eXlOidRX5Z169bh9OnTOHny5Cupn3q90GeKVKc0NTVh5cqVUCqV6NWrF3x8fBAXF0cnROqFBAQEoKysDPfv38ebb775StoQCoXYsmXLK6mbev3QlSJFURRFtaJfyaAoiqKoVnRSpCiKoqhWdFKkKIqiqFZ0UqQoiqKoVnRSpCiKoqhWdFKkKIqiqFZ0UqQoiqKoVnRSpCiKoqhW/wPkIo2PnzzY1wAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "variations_table_plot = {k:variations_table[k] for k in variations_table if k in ['time stretching', 'pitch shifting', 'noise']}\n", "fig = benchmark.plot_variations(variations_table_plot, show_diff = True, figsize=(4.6, 4.2));\n", diff --git a/ferret/__init__.py b/ferret/__init__.py index 2fd0143..3fc607b 100644 --- a/ferret/__init__.py +++ b/ferret/__init__.py @@ -4,9 +4,16 @@ __email__ = "giuseppeattanasio6@gmail.com" __version__ = "0.5.0" -from logging import getLogger +import logging -logger = getLogger(__name__) +# create logger +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) +ch = logging.StreamHandler() +ch.setLevel(logging.DEBUG) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +logger.addHandler(ch) from .benchmark import Benchmark @@ -37,6 +44,18 @@ from .modeling.text_helpers import TokenClassificationHelper +# Check for manual installation of `WhisperX`. +try: + import whisperx +except ImportError as e: + logging.error( + 'Library whisperx not found. Please install it manually from GitHub: ' + '`pip install git+https://github.com/m-bain/whisperx.git`' + ) + + raise e + + # Conditional imports for speech-related tasks try: # Explainers @@ -57,7 +76,12 @@ AOPC_Comprehensiveness_Evaluation_Speech, AOPC_Sufficiency_Evaluation_Speech, ) -except ImportError: - logger.info( - "Speech-related modules could not be imported. It is very likely that ferret was installed in the standard, text-only mode. Run `pip install ferret-xai[speech]` or `pip install ferret-xai[all] to include them." +except ImportError as e: + logger.error( + 'Speech-related modules could not be imported. It is very likely that' + ' ferret was installed in the standard, text-only mode. Run ' + '`pip install ferret-xai[speech]` or `pip install ferret-xai[all]` to' + ' include them' ) + + raise e diff --git a/ferret/benchmark_speech.py b/ferret/benchmark_speech.py index 451f089..42e72a7 100644 --- a/ferret/benchmark_speech.py +++ b/ferret/benchmark_speech.py @@ -1,10 +1,8 @@ import numpy as np import pandas as pd -from typing import Dict, List, Union, Tuple -from pydub import AudioSegment +from typing import List, Union, Tuple import torch import seaborn as sns -from IPython.display import display from .explainers.explanation_speech.loo_speech_explainer import LOOSpeechExplainer from .explainers.explanation_speech.gradient_speech_explainer import ( GradientSpeechExplainer, @@ -13,8 +11,7 @@ from .explainers.explanation_speech.paraling_speech_explainer import ( ParalinguisticSpeechExplainer, ) -from .explainers.explanation_speech.explanation_speech import ExplanationSpeech -from .speechxai_utils import pydub_to_np, print_log, FerretAudio +from .speechxai_utils import FerretAudio SCORES_PALETTE = sns.diverging_palette(240, 10, as_cmap=True) @@ -123,7 +120,7 @@ def explain( # TODO UNIFY THE INPUT FORMAT # First things first. We transform any type of input in a suitable numpy array and we proceed with that on. - ferret_audio = FerretAudio(audio_path_or_array, native_sr=native_sr, model_helper=self.explainers.model_helper) + ferret_audio = FerretAudio(audio_path_or_array, native_sr=native_sr, model_helper=self.model_helper) ## Get the importance of each class (action, object, location) according to the perturb_paraling type if methodology == "perturb_paraling": diff --git a/ferret/evaluators/faithfulness_measures_speech.py b/ferret/evaluators/faithfulness_measures_speech.py index f8b8107..0d2ae21 100644 --- a/ferret/evaluators/faithfulness_measures_speech.py +++ b/ferret/evaluators/faithfulness_measures_speech.py @@ -78,8 +78,9 @@ def compute_evaluation( # Single probability ground_truth_probs_target = [ground_truth_probs[0][target[0]]] - # Splite the audio into word-level audio segments - from ..explainers.explanation_speech.loo_speech_explainer import transcribe_audio + # TODO: modify to accept a `FerretAudio` object as input. + # Split the audio into word-level audio segments + from ..speechxai_utils import transcribe_audio if words_trascript is None: text, words_trascript = transcribe_audio( @@ -255,8 +256,10 @@ def compute_evaluation( # Single probability ground_truth_probs_target = [ground_truth_probs[0][target[0]]] - # Splite the audio into word-level audio segments - from ..explainers.explanation_speech.loo_speech_explainer import transcribe_audio + # TODO: as above, probably a `FerretAudio` object should we passed as + # input. + # Split the audio into word-level audio segments + from ..speechxai_utils import transcribe_audio if words_trascript is None: text, words_trascript = transcribe_audio( diff --git a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py index 6cf8c4f..4e70832 100644 --- a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py @@ -5,8 +5,6 @@ import torch from ..explanation_speech import ExplanationSpeech from ....speechxai_utils import pydub_to_np -# TODO - include in utils -from ..loo_speech_explainer import transcribe_audio class GradientEqualWidthSpeechExplainer: diff --git a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py index 6a59a3c..e2f5f15 100644 --- a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py @@ -2,7 +2,6 @@ from pydub import AudioSegment import numpy as np from ..lime_timeseries import LimeTimeSeriesExplainer -from ..utils_removal import transcribe_audio from ..explanation_speech import ExplanationSpeech from ....speechxai_utils import pydub_to_np diff --git a/ferret/explainers/explanation_speech/lime_speech_explainer.py b/ferret/explainers/explanation_speech/lime_speech_explainer.py index 4a383ac..8fba02e 100644 --- a/ferret/explainers/explanation_speech/lime_speech_explainer.py +++ b/ferret/explainers/explanation_speech/lime_speech_explainer.py @@ -2,9 +2,8 @@ from pydub import AudioSegment import numpy as np from .lime_timeseries import LimeTimeSeriesExplainer -from .utils_removal import transcribe_audio from .explanation_speech import ExplanationSpeech -from ...speechxai_utils import pydub_to_np, FerretAudio +from ...speechxai_utils import FerretAudio EMPTY_SPAN = "---" diff --git a/ferret/explainers/explanation_speech/paraling_speech_explainer.py b/ferret/explainers/explanation_speech/paraling_speech_explainer.py index 2f1edc1..6c7bfc2 100644 --- a/ferret/explainers/explanation_speech/paraling_speech_explainer.py +++ b/ferret/explainers/explanation_speech/paraling_speech_explainer.py @@ -14,7 +14,7 @@ PolarityInversion, ) from .explanation_speech import ExplanationSpeech -from ...speechxai_utils import pydub_to_np, print_log +from ...speechxai_utils import pydub_to_np # If True, We use the audiostretchy library to perform time stretching @@ -32,13 +32,13 @@ def _tmp_log1( n_labels, ): if n_labels > 1: - print_log("Target label: ", verbose_target) - print_log("gt", original_gt[verbose_target]) - print_log("m", modified_trg[verbose_target]) + print("Target label: ", verbose_target) + print("gt", original_gt[verbose_target]) + print("m", modified_trg[verbose_target]) else: - print_log("gt", original_gt) - print_log("m", modified_trg) + print("gt", original_gt) + print("m", modified_trg) def _tmp_log2( diff --git a/ferret/explainers/explanation_speech/utils_removal.py b/ferret/explainers/explanation_speech/utils_removal.py index 37249b4..e6744f5 100644 --- a/ferret/explainers/explanation_speech/utils_removal.py +++ b/ferret/explainers/explanation_speech/utils_removal.py @@ -1,9 +1,6 @@ from pydub import AudioSegment -import whisperx import os import numpy as np -from typing import Dict, List, Union, Tuple -from ...speechxai_utils import FerretAudio def remove_specified_words(audio, words, removal_type: str = "nothing"): @@ -52,109 +49,6 @@ def remove_specified_words(audio, words, removal_type: str = "nothing"): return audio_removed -def transcribe_audio( - audio: np.ndarray, - device: str = "cuda", - batch_size: int = 2, - compute_type: str = "float32", - language: str = "en", - model_name_whisper: str = "large-v2", -) -> Tuple[str, List[Dict[str, Union[str, float]]]]: - """ - Transcribe audio using whisperx, - and return the text (transcription) and the words with their start and end times. - """ - - ## Load whisperx model - model_whisperx = whisperx.load_model( - model_name_whisper, - device, - compute_type=compute_type, - language=language, - ) - - ## Transcribe audio - # TODO: we are assuming that the array does not come already normalized - # audio_array = audio.normalized_array - # The normalization occurs in the FerretAudio Class - - result = model_whisperx.transcribe(audio, batch_size=batch_size) - model_a, metadata = whisperx.load_align_model( - language_code=result["language"], device=device - ) - - ## Align timestamps - result = whisperx.align( - result["segments"], - model_a, - metadata, - audio, - device, - return_char_alignments=False, - ) - - if result is None or "segments" not in result or len(result["segments"]) == 0: - return "", [] - - if len(result["segments"]) == 1: - text = result["segments"][0]["text"] - words = result["segments"][0]["words"] - else: - text = " ".join( - result["segments"][i]["text"] for i in range(len(result["segments"])) - ) - words = [word for segment in result["segments"] for word in segment["words"]] - - # Remove words that are not properly transcribed - words = [word for word in words if "start" in word] - return text, words - - -def transcribe_audio_given_model( - model_whisperx, - audio_path: str, - batch_size: int = 2, - device: str = "cuda", -) -> Tuple[str, List[Dict[str, Union[str, float]]]]: - """ - Transcribe audio using whisperx, - and return the text (transcription) and the words with their start and end times. - """ - - ## Transcribe audio - audio = whisperx.load_audio(audio_path) - result = model_whisperx.transcribe(audio, batch_size=batch_size) - model_a, metadata = whisperx.load_align_model( - language_code=result["language"], device=device - ) - - ## Align timestamps - result = whisperx.align( - result["segments"], - model_a, - metadata, - audio, - device, - return_char_alignments=False, - ) - - if result is None or "segments" not in result or len(result["segments"]) == 0: - return "", [] - - if len(result["segments"]) == 1: - text = result["segments"][0]["text"] - words = result["segments"][0]["words"] - else: - text = " ".join( - result["segments"][i]["text"] for i in range(len(result["segments"])) - ) - words = [word for segment in result["segments"] for word in segment["words"]] - - # Remove words that are not properly transcribed - words = [word for word in words if "start" in word] - return text, words - - def remove_word(audio, word, removal_type: str = "nothing"): """ Remove a word from audio using pydub, by replacing it with: diff --git a/ferret/speechxai_utils.py b/ferret/speechxai_utils.py index 087e1c6..93f3b3b 100644 --- a/ferret/speechxai_utils.py +++ b/ferret/speechxai_utils.py @@ -8,8 +8,114 @@ from datasets import Dataset from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor import librosa -from typing import Union -from explainers.explanation_speech.utils_removal import transcribe_audio +import whisperx +from typing import Dict, List, Union, Tuple + + +def transcribe_audio( + audio: np.ndarray, + device: str = "cuda", + batch_size: int = 2, + compute_type: str = "float32", + language: str = "en", + model_name_whisper: str = "large-v2", +) -> Tuple[str, List[Dict[str, Union[str, float]]]]: + """ + Transcribe audio using whisperx, + and return the text (transcription) and the words with their start and end times. + """ + + ## Load whisperx model + model_whisperx = whisperx.load_model( + model_name_whisper, + device, + compute_type=compute_type, + language=language, + ) + + ## Transcribe audio + # TODO: we are assuming that the array does not come already normalized + # audio_array = audio.normalized_array + # The normalization occurs in the FerretAudio Class + + result = model_whisperx.transcribe( + audio, + batch_size=batch_size + ) + model_a, metadata = whisperx.load_align_model( + language_code=result["language"], device=device + ) + + ## Align timestamps + result = whisperx.align( + result["segments"], + model_a, + metadata, + audio, + device, + return_char_alignments=False, + ) + + if result is None or "segments" not in result or len(result["segments"]) == 0: + return "", [] + + if len(result["segments"]) == 1: + text = result["segments"][0]["text"] + words = result["segments"][0]["words"] + else: + text = " ".join( + result["segments"][i]["text"] for i in range(len(result["segments"])) + ) + words = [word for segment in result["segments"] for word in segment["words"]] + + # Remove words that are not properly transcribed + words = [word for word in words if "start" in word] + return text, words + + +def transcribe_audio_given_model( + model_whisperx, + audio_path: str, + batch_size: int = 2, + device: str = "cuda", +) -> Tuple[str, List[Dict[str, Union[str, float]]]]: + """ + Transcribe audio using whisperx, + and return the text (transcription) and the words with their start and end times. + """ + + ## Transcribe audio + audio = whisperx.load_audio(audio_path) + result = model_whisperx.transcribe(audio, batch_size=batch_size) + model_a, metadata = whisperx.load_align_model( + language_code=result["language"], device=device + ) + + ## Align timestamps + result = whisperx.align( + result["segments"], + model_a, + metadata, + audio, + device, + return_char_alignments=False, + ) + + if result is None or "segments" not in result or len(result["segments"]) == 0: + return "", [] + + if len(result["segments"]) == 1: + text = result["segments"][0]["text"] + words = result["segments"][0]["words"] + else: + text = " ".join( + result["segments"][i]["text"] for i in range(len(result["segments"])) + ) + words = [word for segment in result["segments"] for word in segment["words"]] + + # Remove words that are not properly transcribed + words = [word for word in words if "start" in word] + return text, words class FerretAudio: diff --git a/pyproject.toml b/pyproject.toml index 7673e31..d254af7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,13 @@ pydub = { version = "0.25.1", optional = true } audiomentations = { version = "0.34.1", optional = true } audiostretchy = { version = "1.3.5", optional = true } pyroomacoustics = { version = "0.7.3", optional = true } -whisperx = { version = "3.1.2", optional = true } + +# The version of WhisperX currently on PyPI has a problem with a dependency, +# so the dependency needs to be installed from the GitHub repo, which in turns +# prevents it from being used among the extras in pyproject.toml. Until a +# working version of WhisperX is released, the users are required to install +# it from the repo manually. +# whisperx = { version = "3.1.2", optional = true } [tool.poetry.extras] speech = [ @@ -56,14 +62,14 @@ speech = [ "audiomentations", "audiostretchy", "pyroomacoustics", - "whisperx" + # "whisperx" ] all = [ "pydub", "audiomentations", "audiostretchy", "pyroomacoustics", - "whisperx" + # "whisperx" ] From 721beb22b161ac2e578c40049fdf3f9651d7a1f9 Mon Sep 17 00:00:00 2001 From: Gaia Geagea Date: Fri, 15 Mar 2024 19:27:06 +0100 Subject: [PATCH 04/21] implementing comments, using FerretAudio homogeneously across explainers, changes to ExplanationSpeech Class --- .../gradient_equal_width_explainer.py | 18 ++++++++---------- .../equal_width/lime_equal_width_explainer.py | 18 ++++++++---------- .../equal_width/loo_equal_width_explainer.py | 18 +++++++----------- .../explanation_speech/explanation_speech.py | 3 ++- .../gradient_speech_explainer.py | 8 ++++---- .../lime_speech_explainer.py | 7 +++---- .../explanation_speech/loo_speech_explainer.py | 2 +- ferret/speechxai_utils.py | 10 +++++----- 8 files changed, 38 insertions(+), 46 deletions(-) diff --git a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py index 4e70832..f264923 100644 --- a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py @@ -4,7 +4,7 @@ import numpy as np import torch from ..explanation_speech import ExplanationSpeech -from ....speechxai_utils import pydub_to_np +from ....speechxai_utils import pydub_to_np, FerretAudio class GradientEqualWidthSpeechExplainer: @@ -56,7 +56,7 @@ def _get_gradient_importance_frame_level( def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, aggregation: str = "mean", num_s_split: float = 0.25, @@ -64,7 +64,7 @@ def compute_explanation( """ Compute the word-level explanation for the given audio. Args: - audio_path: path to the audio file + audio: An instance of the FerretAudio class containing the input audio data. target_class: target class - int - If None, use the predicted class no_before_span: if True, it also consider the span before the word. This is because we observe gradient give importance also for the frame just before the word aggregation: aggregation method for the frames of the word. Can be "mean" or "max" @@ -76,12 +76,10 @@ def compute_explanation( "Aggregation method not supported, choose between 'mean' and 'max'" ) - # Load audio and convert to np.array - audio_as = AudioSegment.from_wav(audio_path) - audio = pydub_to_np(audio_as)[0] + audio_array = audio.array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -106,7 +104,7 @@ def compute_explanation( for target_label, target_class in enumerate(targets): # Get gradient importance for each frame attr = self._get_gradient_importance_frame_level( - audio, target_class, target_label + audio_array, target_class, target_label ) old_start = 0 @@ -115,7 +113,7 @@ def compute_explanation( importances = [] a, b = 0, 0 # 50, 20 - duration_s = len(audio_as) / 1000 + duration_s = len(audio_array) / audio.sample_rate # finds the duration from the array a, b = 0, 0 for e, i in enumerate(np.arange(0, duration_s, num_s_split)): @@ -158,7 +156,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "-" + aggregation, target=targets if n_labels > 1 else targets, - audio_path=audio_path, + audio=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py index e2f5f15..ac369c5 100644 --- a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py @@ -3,7 +3,7 @@ import numpy as np from ..lime_timeseries import LimeTimeSeriesExplainer from ..explanation_speech import ExplanationSpeech -from ....speechxai_utils import pydub_to_np +from ....speechxai_utils import pydub_to_np, FerretAudio EMPTY_SPAN = "---" @@ -16,7 +16,7 @@ def __init__(self, model_helper): def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, removal_type: str = "silence", num_samples: int = 1000, @@ -24,7 +24,7 @@ def compute_explanation( ) -> ExplanationSpeech: """ Compute the word-level explanation for the given audio. - audio_path: path to the audio file + audio: An instance of the FerretAudio class containing the input audio data. target_class: target class - int - If None, use the predicted class removal_type: """ @@ -34,12 +34,10 @@ def compute_explanation( "Removal method not supported, choose between 'silence' and 'noise'" ) - # Load audio and convert to np.array - audio_as = AudioSegment.from_wav(audio_path) - audio = pydub_to_np(audio_as)[0] + audio_array = audio.array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -58,13 +56,13 @@ def compute_explanation( else: targets = [int(np.argmax(logits_original, axis=1)[0])] - audio_np = audio.reshape(1, -1) + audio_np = audio_array.reshape(1, -1) # Get the start and end indexes of the segments. These will be used to split the audio and derive LIME interpretable features sampling_rate = self.model_helper.feature_extractor.sampling_rate splits = [] - duration_s = len(audio_as) / 1000 + duration_s = len(audio_array) / audio.sample_rate # finds the duration from the array a, b = 0, 0 for e, i in enumerate(np.arange(0, duration_s, num_s_split)): @@ -135,7 +133,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else targets, - audio_path=audio_path, + audio=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py index a560a0c..1754f21 100644 --- a/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py @@ -6,7 +6,7 @@ from pydub import AudioSegment from IPython.display import display from ..explanation_speech import ExplanationSpeech -from ....speechxai_utils import pydub_to_np, print_log +from ....speechxai_utils import pydub_to_np, print_log, FerretAudio def remove_audio_segment(audio, start_s, end_s, removal_type: str = "silence"): @@ -45,8 +45,6 @@ def remove_audio_segment(audio, start_s, end_s, removal_type: str = "silence"): audio_removed = before_word_audio + replace_word_audio + after_word_audio return audio_removed - - class LOOSpeechEqualWidthExplainer: NAME = "loo_speech_equal_width" @@ -55,7 +53,7 @@ def __init__(self, model_helper): def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, removal_type: str = "silence", num_s_split: float = 0.25, @@ -65,19 +63,17 @@ def compute_explanation( Computes the importance of each equal width audio segment in the audio. """ - ## Load audio as pydub.AudioSegment - audio = AudioSegment.from_wav(audio_path) - audio_np = pydub_to_np(audio)[0] + audio_array = audio.array ## Remove word audio_remove_segments = [] - duration_s = len(audio) / 1000 + duration_s = len(audio_array) / audio.sample_rate # finds the duration from the array for i in np.arange(0, duration_s, num_s_split): start_s = i end_s = min(i + num_s_split, duration_s) - audio_removed = remove_audio_segment(audio, start_s, end_s, removal_type) + audio_removed = remove_audio_segment(audio.to_pydub(), start_s, end_s, removal_type) audio_remove_segments.append(pydub_to_np(audio_removed)[0]) @@ -86,7 +82,7 @@ def compute_explanation( display(audio_removed) # Get original logits - logits_original = self.model_helper.predict([audio_np]) + logits_original = self.model_helper.predict([audio_array]) # Get logits for the modified audio by leaving out the equal width segments logits_modified = self.model_helper.predict(audio_remove_segments) @@ -137,7 +133,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else [targets], - audio_path=audio_path, + audio=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/explanation_speech.py b/ferret/explainers/explanation_speech/explanation_speech.py index 139fe38..cdcd905 100644 --- a/ferret/explainers/explanation_speech/explanation_speech.py +++ b/ferret/explainers/explanation_speech/explanation_speech.py @@ -1,6 +1,7 @@ from dataclasses import dataclass import numpy as np from typing import Optional +from ...speechxai_utils import FerretAudio @dataclass @@ -9,7 +10,7 @@ class ExplanationSpeech: scores: np.array explainer: str target: list - audio_path: Optional[str] = None + audio: FerretAudio @dataclass diff --git a/ferret/explainers/explanation_speech/gradient_speech_explainer.py b/ferret/explainers/explanation_speech/gradient_speech_explainer.py index f1cc22b..f05b181 100644 --- a/ferret/explainers/explanation_speech/gradient_speech_explainer.py +++ b/ferret/explainers/explanation_speech/gradient_speech_explainer.py @@ -65,7 +65,7 @@ def compute_explanation( """ Compute the word-level explanation for the given audio. Args: - audio_path: path to the audio file + audio: An instance of the FerretAudio class containing the input audio data. target_class: target class - int - If None, use the predicted class no_before_span: if True, it also consider the span before the word. This is because we observe gradient give importance also for the frame just before the word aggregation: aggregation method for the frames of the word. Can be "mean" or "max" @@ -101,7 +101,7 @@ def compute_explanation( if words_trascript is None: # Transcribe audio - words_trascript = audio.transcribe + words_trascript = audio.transcription # Compute gradient importance for each target label # This also handles the multilabel scenario as for FSC @@ -109,7 +109,7 @@ def compute_explanation( for target_label, target_class in enumerate(targets): # Get gradient importance for each frame attr = self._get_gradient_importance_frame_level( - audio, target_class, target_label + audio_array, target_class, target_label ) old_start = 0 @@ -177,7 +177,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "-" + aggregation, target=targets if n_labels > 1 else targets, - audio_path=audio, + audio=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/lime_speech_explainer.py b/ferret/explainers/explanation_speech/lime_speech_explainer.py index 8fba02e..afeec59 100644 --- a/ferret/explainers/explanation_speech/lime_speech_explainer.py +++ b/ferret/explainers/explanation_speech/lime_speech_explainer.py @@ -25,7 +25,7 @@ def compute_explanation( """ Compute the word-level explanation for the given audio. Args: - audio_path: path to the audio file + audio: An instance of the FerretAudio class containing the input audio data. target_class: target class - int - If None, use the predicted class removal_type: """ @@ -35,7 +35,6 @@ def compute_explanation( "Removal method not supported, choose between 'silence' and 'noise'" ) - # Load audio and convert to np.array audio_array = audio.array # Predict logits/probabilities logits_original = self.model_helper.predict([audio_array]) @@ -59,7 +58,7 @@ def compute_explanation( if words_trascript is None: # Transcribe audio - words_trascript = audio.transcribe + words_trascript = audio.transcription audio_np = audio_array.reshape(1, -1) # Get the start and end indexes of the words. These will be used to split the audio and derive LIME interpretable features @@ -139,7 +138,7 @@ def compute_explanation( scores=scores, explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else targets, - audio_path=audio, + audio=audio, ) return explanation \ No newline at end of file diff --git a/ferret/explainers/explanation_speech/loo_speech_explainer.py b/ferret/explainers/explanation_speech/loo_speech_explainer.py index 9b1a704..a10bef7 100644 --- a/ferret/explainers/explanation_speech/loo_speech_explainer.py +++ b/ferret/explainers/explanation_speech/loo_speech_explainer.py @@ -37,7 +37,7 @@ def remove_words( # TODO GA: transcribing audio might be an operation need by other explainers. I suggest we move it into FerretAudio or somewhere else such that can be done once and then shared (e.g., a method in the SpeechBenchmark class) # transcription moved to the FerretAudio Class if words_trascript is None: - words_trascript = audio.transcribe + words_trascript = audio.transcription ## Load audio as pydub.AudioSegment pydub_segment = audio.to_pydub() diff --git a/ferret/speechxai_utils.py b/ferret/speechxai_utils.py index 93f3b3b..f48f4c9 100644 --- a/ferret/speechxai_utils.py +++ b/ferret/speechxai_utils.py @@ -134,7 +134,7 @@ def __init__( self.native_sr = native_sr self.audio_path_or_array = audio_path_or_array self.model_helper = model_helper - self._transcribe = None + self._transcription = None if isinstance(audio_path_or_array, str): self.native_sr = librosa.get_samplerate(audio_path_or_array) @@ -164,10 +164,10 @@ def normalized_array(self) -> np.ndarray: return self.array @property - def transcribe(self): - if self._transcribe is None: + def transcription(self): + if self._transcription is None: if self.model_helper and hasattr(self.model_helper, 'device') and hasattr(self.model_helper, 'language'): - _ , self._transcribe = transcribe_audio( + _ , self._transcription = transcribe_audio( audio=self.normalized_array, # is normalization needed when transcribing? i am assumimg so device=self.model_helper.device.type, batch_size=2, @@ -176,7 +176,7 @@ def transcribe(self): ) else: raise AttributeError("model_helper is not correctly configured") - return self._transcribe + return self._transcription def to_pydub(self) -> pydub.AudioSegment: """ From d550900df3d453306e9937def7243f333363d25c Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Mon, 18 Mar 2024 11:41:22 +0100 Subject: [PATCH 05/21] Adapt faithfulness measures for speech to use FerretAudio objects, fix notebook --- examples/speech/getting_started.ipynb | 2 +- .../faithfulness_measures_speech.py | 38 ++++++------------- 2 files changed, 13 insertions(+), 27 deletions(-) diff --git a/examples/speech/getting_started.ipynb b/examples/speech/getting_started.ipynb index b8503e2..a4e62d1 100644 --- a/examples/speech/getting_started.ipynb +++ b/examples/speech/getting_started.ipynb @@ -178,7 +178,7 @@ "outputs": [], "source": [ "explanation = benchmark.explain(\n", - " audio_path=audio_path, \n", + " audio_path_or_array=audio_path, \n", " methodology='LIME'\n", ")\n", "\n", diff --git a/ferret/evaluators/faithfulness_measures_speech.py b/ferret/evaluators/faithfulness_measures_speech.py index 0d2ae21..e04043f 100644 --- a/ferret/evaluators/faithfulness_measures_speech.py +++ b/ferret/evaluators/faithfulness_measures_speech.py @@ -57,13 +57,10 @@ def compute_evaluation( 'The "target" argument is deprecated and will be removed in a future version. The explanation target are used as default.' ) - audio_path = explanation.audio_path - target = explanation.target - # Get the audio from audio_path - audio = AudioSegment.from_wav(audio_path) - audio_np = pydub_to_np(audio)[0] + # Get audio as array. + audio_np = explanation.audio.array # Get prediction probability of the input sencence for the target ground_truth_probs = self.model_helper.predict([audio_np]) @@ -83,13 +80,7 @@ def compute_evaluation( from ..speechxai_utils import transcribe_audio if words_trascript is None: - text, words_trascript = transcribe_audio( - audio_path=audio_path, - device=self.model_helper.device.type, - batch_size=2, - compute_type="float32", - language=self.model_helper.language, - ) + words_trascript = explanation.audio.transcription get_discrete_rationale_function = ( _check_and_define_get_id_discrete_rationale_function( @@ -152,7 +143,9 @@ def compute_evaluation( words_removed = [words_trascript[i] for i in id_top] audio_removed = remove_specified_words( - audio, words_removed, removal_type=removal_type + explanation.audio.to_pydub(), + words_removed, + removal_type=removal_type ) audio_removed_np = pydub_to_np(audio_removed)[0] @@ -235,13 +228,10 @@ def compute_evaluation( 'The "target" argument is deprecated and will be removed in a future version. The explanation target are used as default.' ) - audio_path = explanation.audio_path - target = explanation.target - # Get the audio from audio_path - audio = AudioSegment.from_wav(audio_path) - audio_np = pydub_to_np(audio)[0] + # Get audio as an array. + audio_np = explanation.audio.array # Get prediction probability of the input sencence for the target ground_truth_probs = self.model_helper.predict([audio_np]) @@ -262,13 +252,7 @@ def compute_evaluation( from ..speechxai_utils import transcribe_audio if words_trascript is None: - text, words_trascript = transcribe_audio( - audio_path=audio_path, - device=self.model_helper.device.type, - batch_size=2, - compute_type="float32", - language=self.model_helper.language, - ) + words_trascript = explanation.audio.transcription get_discrete_rationale_function = ( _check_and_define_get_id_discrete_rationale_function( @@ -335,7 +319,9 @@ def compute_evaluation( ] audio_removed = remove_specified_words( - audio, words_removed, removal_type=removal_type + explanation.audio.to_pydub(), + words_removed, + removal_type=removal_type ) audio_removed_np = pydub_to_np(audio_removed)[0] From 3eea1bde90e115dfdf5639251aef9dff04883f20 Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Tue, 19 Mar 2024 12:55:54 +0100 Subject: [PATCH 06/21] Fix conversion of array to pydub type --- ferret/speechxai_utils.py | 44 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/ferret/speechxai_utils.py b/ferret/speechxai_utils.py index f48f4c9..9c07a31 100644 --- a/ferret/speechxai_utils.py +++ b/ferret/speechxai_utils.py @@ -138,6 +138,8 @@ def __init__( if isinstance(audio_path_or_array, str): self.native_sr = librosa.get_samplerate(audio_path_or_array) + + # Note: by default, librosa returns an array normalized in [-1,1]. self.array, self.sample_rate = librosa.load( audio_path_or_array, sr=self.target_sr, dtype=np.float32 ) @@ -178,14 +180,50 @@ def transcription(self): raise AttributeError("model_helper is not correctly configured") return self._transcription + @staticmethod + def unnormalize_array(arr, dtype=np.int16): + """ + Given a NumPy array normalized in `[-1, 1]`, returns an array rescaled + in `[-max, max]`, where `max` is the maximum (in absolute value) + (integer) number representable by the selected `dtype`. In practice, + we convert a normalized array of dtype `float32` into a normalized + one of dtype `int16`, as needed to create a PyDub `AudioSegment` + object. + """ + max_val = np.maximum( + np.iinfo(dtype).max, + np.abs(np.iinfo(dtype).min) + ) + + return (arr * max_val).astype(dtype) + def to_pydub(self) -> pydub.AudioSegment: """ - Converts audio to pydub.AudioSegment. + Converts audio to `pydub.AudioSegment`. + + Notes: + * In order to convert to PyDub `AudioSegment` type we need the + array to be + * of dtype int16, + * NOT normalized. + Therefore, if the array is normalized, we unnormalize it. + * In any case, PyDub only works with unnormalized arrays of dtype + int16, so that's what we need to pass as the input to + `AudioSegment`. + * Because we only manipulate mono audio, the array can either have + shape `(n_samples, 1)` or `(n_samples,)` (flat array). Either is + fine for PyDub (the extra dimension is taken care of + automatically for mono audio). """ + if self.is_normalized: + unnormalized_array = self.unnormalize_array(self.array) + else: + unnormalized_array = self.array + return pydub.AudioSegment( - self.array.tobytes(), + unnormalized_array.tobytes(), frame_rate=self.target_sr, - sample_width=self.array.dtype.itemsize, + sample_width=unnormalized_array.dtype.itemsize, channels=1, ) From 87e9076cf99e2471817bbca4b6d04acbe31f0b2a Mon Sep 17 00:00:00 2001 From: Giuseppe Attanasio Date: Tue, 19 Mar 2024 19:55:25 +0000 Subject: [PATCH 07/21] - Add a new "transcribe" method - if word timestamps are not provided they are generated on the fly - each word timestamps expects word transcripts - word timestamps are not external to the FerretAudio class - add a new notebook to show this behavior --- examples/speech/new_notebook.ipynb | 546 ++++++++++++++++++ ferret/benchmark_speech.py | 67 ++- .../faithfulness_measures_speech.py | 26 +- .../explanation_speech/explanation_speech.py | 5 +- .../gradient_speech_explainer.py | 17 +- .../lime_speech_explainer.py | 17 +- .../loo_speech_explainer.py | 19 +- ferret/speechxai_utils.py | 267 ++++----- 8 files changed, 782 insertions(+), 182 deletions(-) create mode 100644 examples/speech/new_notebook.ipynb diff --git a/examples/speech/new_notebook.ipynb b/examples/speech/new_notebook.ipynb new file mode 100644 index 0000000..3a4b0ae --- /dev/null +++ b/examples/speech/new_notebook.ipynb @@ -0,0 +1,546 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Speech XAI" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/pydub/utils.py:170: RuntimeWarning: Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work\n", + " warn(\"Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work\", RuntimeWarning)\n", + "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/pyannote/audio/core/io.py:43: UserWarning: torchaudio._backend.set_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n", + " torchaudio.set_audio_backend(\"soundfile\")\n", + "torchvision is not available - cannot save figures\n" + ] + } + ], + "source": [ + "from datasets import Dataset, load_dataset\n", + "from IPython.display import display\n", + "import numpy as np \n", + "import os\n", + "import pandas as pd\n", + "from pathlib import Path\n", + "from pydub import AudioSegment\n", + "import torch\n", + "from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor\n", + "\n", + "from ferret import SpeechBenchmark, AOPC_Comprehensiveness_Evaluation_Speech, AOPC_Sufficiency_Evaluation_Speech" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "DATASET_ID = \"DynamicSuperb/IntentClassification_FluentSpeechCommands-Action\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cuda:2\n" + ] + } + ], + "source": [ + "device = 'cuda:2' if torch.cuda.is_available() else 'cpu'\n", + "print(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset({\n", + " features: ['file', 'speakerId', 'transcription', 'audio', 'label', 'instruction'],\n", + " num_rows: 200\n", + "})" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "data = load_dataset(DATASET_ID, split=\"test\")\n", + "data" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'file': 'wavs/speakers/Xygv5loxdZtrywr9/77506ae0-452b-11e9-a843-8db76f4b5e29.wav',\n", + " 'speakerId': 'Xygv5loxdZtrywr9',\n", + " 'transcription': 'Increase the temperature in the washroom',\n", + " 'audio': {'path': '77506ae0-452b-11e9-a843-8db76f4b5e29.wav',\n", + " 'array': array([0. , 0. , 0. , ..., 0.02133179, 0.01977539,\n", + " 0.01849365]),\n", + " 'sampling_rate': 16000},\n", + " 'label': 'increase',\n", + " 'instruction': 'Recognize the action behind the verbal expression. The answer could be activate, bring, change language, deactivate, decrease, or increase.'}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sample = data[0]\n", + "sample" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this notebook we are using Wav2Vec2 which expects audio arrays to be in 16kHz. Luckly, this is the native sampling rate of our data. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Models" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at superb/wav2vec2-base-superb-ic were not used when initializing Wav2Vec2ForSequenceClassification: ['wav2vec2.encoder.pos_conv_embed.conv.weight_g', 'wav2vec2.encoder.pos_conv_embed.conv.weight_v']\n", + "- This IS expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of Wav2Vec2ForSequenceClassification were not initialized from the model checkpoint at superb/wav2vec2-base-superb-ic and are newly initialized: ['wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + } + ], + "source": [ + "## Load model\n", + "model = Wav2Vec2ForSequenceClassification.from_pretrained(\n", + " \"superb/wav2vec2-base-superb-ic\"\n", + ")\n", + "feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n", + " \"superb/wav2vec2-base-superb-ic\"\n", + ")\n", + "\n", + "if torch.cuda.is_available():\n", + " model = model.to(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Speech-XAI: the `SpeechBenchmark` class" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: if not specified otherwise, `SpeechBenchmark` assumes English as the source language." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "## Instantiate benchmark class\n", + "benchmark = SpeechBenchmark(model, feature_extractor, device=device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's start from transcribing the example above using WhisperX." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n" + ] + }, + { + "data": { + "text/plain": [ + "(' Increase the temperature in the washroom.',\n", + " [{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438},\n", + " {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141},\n", + " {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444},\n", + " {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848},\n", + " {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953},\n", + " {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text, word_timestamps = benchmark.transcribe(\n", + " sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + ")\n", + "text, word_timestamps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Explain word importance" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Word importance" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.47325948, -0.45515063, -0.10200211, -0.15734437, -0.12148061,\n", + " 0.0109534 ],\n", + " [ 0.07733697, -0.02064097, 0.34651279, -0.01588559, -0.01463729,\n", + " -0.02365428],\n", + " [-0.01432282, -0.01848161, -0.00988954, -0.00070852, -0.01123005,\n", + " 0.32860303]]), explainer='loo_speech+silence', target=[3, 4, 3], audio=)\n" + ] + } + ], + "source": [ + "explanation = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + " methodology='LOO',\n", + " word_timestamps=word_timestamps\n", + ")\n", + "# display(benchmark.show_table(explanation, decimals=3))\n", + "print(explanation)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.30518979, -0.05905298, 0.02406042, 0.06312685, -0.01027066,\n", + " 0.00634839],\n", + " [-0.00192933, 0.04791304, 0.30365684, 0.01351917, -0.02577572,\n", + " 0.13388124],\n", + " [ 0.07868745, -0.02967894, 0.21510287, 0.02970933, 0.03952176,\n", + " 0.44306288]]), explainer='LIME+silence', target=[3, 4, 3], audio=)\n" + ] + } + ], + "source": [ + "explanation = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"], \n", + " methodology='LIME',\n", + " word_timestamps=word_timestamps\n", + ")\n", + "print(explanation)\n", + "#display(benchmark.show_table(explanation, decimals=3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can run the same function but with no word timestamps. The class will generate them automatically." + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "int" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "type(sample[\"audio\"][\"sampling_rate\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Transcribing audio to get word level timestamps...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", + "Transcribed audio with whisperX into: Increase the temperature in the washroom.\n", + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 2.73476301e-01, -2.75996308e-02, 2.68968859e-02,\n", + " 4.38230033e-02, -9.83693653e-03, 3.43606501e-02],\n", + " [-4.55664511e-02, 2.00727565e-04, 3.07805104e-01,\n", + " -7.30904579e-03, 8.18154319e-03, 1.45066594e-01],\n", + " [ 7.67946057e-02, -1.63121582e-02, 1.69544374e-01,\n", + " 1.03233484e-02, 6.95427995e-02, 4.02942428e-01]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + ] + } + ], + "source": [ + "explanation = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"], \n", + " methodology='LIME',\n", + ")\n", + "print(explanation)\n", + "#display(benchmark.show_table(explanation, decimals=3))" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(EvaluationSpeech(name='aopc_compr_speech', score=[0.32901989901438355, 0.4174739196896553, 0.5148161690682173], target=[3, 4, 3]),\n", + " EvaluationSpeech(name='aopc_suff', score=[0.17665663920342922, -0.009631142020225525, -0.01769007444381714], target=[3, 4, 3]))" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aopc_compr = AOPC_Comprehensiveness_Evaluation_Speech(benchmark.model_helper)\n", + "evaluation_output_c = aopc_compr.compute_evaluation(explanation)\n", + "\n", + "aopc_suff = AOPC_Sufficiency_Evaluation_Speech(benchmark.model_helper)\n", + "evaluation_output_s = aopc_suff.compute_evaluation(explanation)\n", + "\n", + "evaluation_output_c, evaluation_output_s" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Explain paralinguistic impact" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Transcribing audio to get word level timestamps...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", + "Transcribed audio with whisperX into: Increase the temperature in the washroom.\n" + ] + }, + { + "ename": "TypeError", + "evalue": "compute_explanation() got an unexpected keyword argument 'audio'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[32], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m explain_table \u001b[38;5;241m=\u001b[39m \u001b[43mbenchmark\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexplain\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[43maudio_path_or_array\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msample\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43maudio\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43marray\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3\u001b[0m \u001b[43m \u001b[49m\u001b[43mcurrent_sr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msample\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43maudio\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msampling_rate\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethodology\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mperturb_paraling\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m)\u001b[49m\n\u001b[1;32m 6\u001b[0m display(benchmark\u001b[38;5;241m.\u001b[39mshow_table(explain_table, decimals\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m))\n", + "File \u001b[0;32m~/ferret/ferret/benchmark_speech.py:179\u001b[0m, in \u001b[0;36mSpeechBenchmark.explain\u001b[0;34m(self, audio_path_or_array, current_sr, target_class, methodology, perturbation_types, removal_type, aggregation, num_samples, word_timestamps, verbose, verbose_target)\u001b[0m\n\u001b[1;32m 177\u001b[0m explainer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexplainers[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mperturb_paraling\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[1;32m 178\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m perturbation_type \u001b[38;5;129;01min\u001b[39;00m perturbation_types:\n\u001b[0;32m--> 179\u001b[0m explanation \u001b[38;5;241m=\u001b[39m \u001b[43mexplainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompute_explanation\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 180\u001b[0m \u001b[43m \u001b[49m\u001b[43maudio\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mferret_audio\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 181\u001b[0m \u001b[43m \u001b[49m\u001b[43mtarget_class\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtarget_class\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 182\u001b[0m \u001b[43m \u001b[49m\u001b[43mperturbation_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mperturbation_type\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 183\u001b[0m \u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverbose\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 184\u001b[0m \u001b[43m \u001b[49m\u001b[43mverbose_target\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverbose_target\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 185\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 186\u001b[0m explanations\u001b[38;5;241m.\u001b[39mappend(explanation)\n\u001b[1;32m 188\u001b[0m \u001b[38;5;66;03m# table = self.create_table(importances)\u001b[39;00m\n\u001b[1;32m 189\u001b[0m \u001b[38;5;66;03m## Get the importance of each word\u001b[39;00m\n\u001b[1;32m 190\u001b[0m \u001b[38;5;66;03m# elif:\u001b[39;00m\n\u001b[1;32m 191\u001b[0m \n\u001b[1;32m 192\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n", + "\u001b[0;31mTypeError\u001b[0m: compute_explanation() got an unexpected keyword argument 'audio'" + ] + } + ], + "source": [ + "explain_table = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + " methodology='perturb_paraling',\n", + ")\n", + "display(benchmark.show_table(explain_table, decimals=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Show variation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "perturbation_types = ['time stretching', 'pitch shifting', 'reverberation', 'noise']\n", + "variations_table = benchmark.explain_variations(\n", + " audio_path=audio_path,\n", + " perturbation_types=perturbation_types\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "variations_table_plot = {k:variations_table[k] for k in variations_table if k in ['time stretching', 'pitch shifting', 'noise']}\n", + "fig = benchmark.plot_variations(variations_table_plot, show_diff = True, figsize=(4.6, 4.2));\n", + "# fig.savefig(f'example_{dataset_name}_context.pdf', bbox_inches='tight')" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "SUPERB - IC Task (FSC).ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/ferret/benchmark_speech.py b/ferret/benchmark_speech.py index 42e72a7..3979a46 100644 --- a/ferret/benchmark_speech.py +++ b/ferret/benchmark_speech.py @@ -1,6 +1,6 @@ import numpy as np import pandas as pd -from typing import List, Union, Tuple +from typing import List, Union, Tuple, Optional import torch import seaborn as sns from .explainers.explanation_speech.loo_speech_explainer import LOOSpeechExplainer @@ -11,7 +11,7 @@ from .explainers.explanation_speech.paraling_speech_explainer import ( ParalinguisticSpeechExplainer, ) -from .speechxai_utils import FerretAudio +from .speechxai_utils import FerretAudio, transcribe_audio SCORES_PALETTE = sns.diverging_palette(240, 10, as_cmap=True) @@ -31,14 +31,14 @@ def __init__( self, model, feature_extractor, - device: str = "cuda:0", + device: str = "cpu", language: str = "en", explainers=None, ): self.model = model self.feature_extractor = feature_extractor self.model.eval() - self.device = torch.device(device if torch.cuda.is_available() else "cpu") + self.device = torch.device(device) self.language = language if "superb-ic" in self.model.name_or_path: @@ -89,10 +89,51 @@ def predict( # Just a wrapper around ModelHelperFSC.predict/ModelHelperFSC.predict_single We use the second to overcome the padding issue return self.model_helper.predict(audios) + def _transcribe(self, **transcription_args): + transcription_output = transcribe_audio(**transcription_args) + return transcription_output + + def transcribe( + self, + audio_path_or_array: Union[str, np.ndarray], + current_sr: Optional[int] = None, + batch_size: Optional[int] = 1, + compute_type: Optional[str] = "float32", + model_name_whisper: Optional[str] = "large-v2", + ): + """ + Transcribe the audio and return the transcription. + + Args: + audio_path_or_array: path to the audio file or numpy array with the audio data. + language: language of the audio + current_sr: current sample rate of the audio + batch_size: batch size for the transcription + compute_type: the type of the input data for the model + model_name_whisper: the name of the model to use for the transcription + + Returns: + (text, word_transcripts) + """ + # we do this to introduce sanity checks on the audio + audio = FerretAudio(audio_path_or_array, current_sr=current_sr) + if audio.current_sr != 16_000: + audio.resample(16_000) # this is required by WhisperX + + transcription_output = self._transcribe( + audio=audio.normalized_array, + language=self.language, + batch_size=batch_size, + compute_type=compute_type, + model_name_whisper=model_name_whisper, + device=self.device, + ) + return transcription_output + def explain( self, audio_path_or_array: Union[str, np.ndarray], - native_sr: int = None, + current_sr: int = None, target_class: str = None, methodology: str = "LOO", perturbation_types: List[str] = [ @@ -108,7 +149,7 @@ def explain( removal_type: str = "silence", # Used only for LOO and LIME - explainer_args TODO aggregation: str = "mean", # Used only for Gradient and GradientXInput - explainer_args TODO num_samples: int = 1000, # Used only for LIME - explainer_args TODO - words_trascript: List = None, + word_timestamps: List = None, verbose: bool = False, verbose_target: int = 0, ): @@ -119,8 +160,16 @@ def explain( explainer_args = dict() # TODO UNIFY THE INPUT FORMAT - # First things first. We transform any type of input in a suitable numpy array and we proceed with that on. - ferret_audio = FerretAudio(audio_path_or_array, native_sr=native_sr, model_helper=self.model_helper) + # 1. Run sanity checks + ferret_audio = FerretAudio(audio_path_or_array, current_sr=current_sr) + + # 2. We will need word level transcripts, let's force generate them if not provided + if word_timestamps is None: + print("Transcribing audio to get word level timestamps...") + text, word_timestamps = self.transcribe( + audio_path_or_array=audio_path_or_array, current_sr=current_sr + ) + print(f"Transcribed audio with whisperX into: {text}") ## Get the importance of each class (action, object, location) according to the perturb_paraling type if methodology == "perturb_paraling": @@ -160,7 +209,7 @@ def explain( explanation = explainer.compute_explanation( audio=ferret_audio, target_class=target_class, - words_trascript=words_trascript, + word_timestamps=word_timestamps, **explainer_args, ) explanations = explanation diff --git a/ferret/evaluators/faithfulness_measures_speech.py b/ferret/evaluators/faithfulness_measures_speech.py index e04043f..95e178c 100644 --- a/ferret/evaluators/faithfulness_measures_speech.py +++ b/ferret/evaluators/faithfulness_measures_speech.py @@ -10,7 +10,9 @@ ) from ..evaluators.faithfulness_measures import _compute_aopc from ..explainers.explanation_speech.explanation_speech import ( - ExplanationSpeech, EvaluationSpeech) + ExplanationSpeech, + EvaluationSpeech, +) from ..explainers.explanation_speech.utils_removal import remove_specified_words from ..speechxai_utils import pydub_to_np @@ -25,7 +27,7 @@ def compute_evaluation( self, explanation: ExplanationSpeech, target=None, - words_trascript: List = None, + # word_timestamps: List = None, **evaluation_args, ) -> EvaluationSpeech: """Evaluate an explanation on the AOPC Comprehensiveness metric. @@ -79,8 +81,8 @@ def compute_evaluation( # Split the audio into word-level audio segments from ..speechxai_utils import transcribe_audio - if words_trascript is None: - words_trascript = explanation.audio.transcription + # if word_timestamps is None: + word_timestamps = explanation.word_timestamps get_discrete_rationale_function = ( _check_and_define_get_id_discrete_rationale_function( @@ -140,12 +142,12 @@ def compute_evaluation( # For the comprehensiveness: we remove the terms in the discrete rationale. - words_removed = [words_trascript[i] for i in id_top] + words_removed = [word_timestamps[i] for i in id_top] audio_removed = remove_specified_words( explanation.audio.to_pydub(), words_removed, - removal_type=removal_type + removal_type=removal_type, ) audio_removed_np = pydub_to_np(audio_removed)[0] @@ -196,7 +198,7 @@ def compute_evaluation( self, explanation: ExplanationSpeech, target: List = None, - words_trascript: List = None, + # words_trascript: List = None, **evaluation_args, ) -> EvaluationSpeech: """Evaluate an explanation on the AOPC Sufficiency metric. @@ -246,13 +248,9 @@ def compute_evaluation( # Single probability ground_truth_probs_target = [ground_truth_probs[0][target[0]]] - # TODO: as above, probably a `FerretAudio` object should we passed as - # input. # Split the audio into word-level audio segments - from ..speechxai_utils import transcribe_audio - - if words_trascript is None: - words_trascript = explanation.audio.transcription + # if words_trascript is None: + words_trascript = explanation.word_timestamps get_discrete_rationale_function = ( _check_and_define_get_id_discrete_rationale_function( @@ -321,7 +319,7 @@ def compute_evaluation( audio_removed = remove_specified_words( explanation.audio.to_pydub(), words_removed, - removal_type=removal_type + removal_type=removal_type, ) audio_removed_np = pydub_to_np(audio_removed)[0] diff --git a/ferret/explainers/explanation_speech/explanation_speech.py b/ferret/explainers/explanation_speech/explanation_speech.py index cdcd905..5f74309 100644 --- a/ferret/explainers/explanation_speech/explanation_speech.py +++ b/ferret/explainers/explanation_speech/explanation_speech.py @@ -1,6 +1,6 @@ from dataclasses import dataclass import numpy as np -from typing import Optional +from typing import Optional, List, Dict from ...speechxai_utils import FerretAudio @@ -11,6 +11,7 @@ class ExplanationSpeech: explainer: str target: list audio: FerretAudio + word_timestamps: Optional[List[Dict]] = None @dataclass @@ -26,4 +27,4 @@ class EvaluationSpeech: name: str score: list - target: list \ No newline at end of file + target: list diff --git a/ferret/explainers/explanation_speech/gradient_speech_explainer.py b/ferret/explainers/explanation_speech/gradient_speech_explainer.py index f05b181..74e57a4 100644 --- a/ferret/explainers/explanation_speech/gradient_speech_explainer.py +++ b/ferret/explainers/explanation_speech/gradient_speech_explainer.py @@ -5,8 +5,10 @@ import torch from .explanation_speech import ExplanationSpeech from ...speechxai_utils import pydub_to_np, FerretAudio + # TODO - include in utils + class GradientSpeechExplainer: NAME = "Gradient" @@ -57,8 +59,8 @@ def _get_gradient_importance_frame_level( def compute_explanation( self, audio: FerretAudio, + word_timestamps: List, target_class=None, - words_trascript: List = None, no_before_span: bool = True, aggregation: str = "mean", ) -> ExplanationSpeech: @@ -99,9 +101,9 @@ def compute_explanation( else: targets = [int(np.argmax(logits_original, axis=1)[0])] - if words_trascript is None: - # Transcribe audio - words_trascript = audio.transcription + # if word_timestamps is None: + # # Transcribe audio + word_timestamps = audio.transcription # Compute gradient importance for each target label # This also handles the multilabel scenario as for FSC @@ -118,7 +120,7 @@ def compute_explanation( importances = [] a, b = 0, 0 # 50, 20 - for word in words_trascript: + for word in word_timestamps: if no_before_span: # We directly consider the transcribed word start_ms = (word["start"] * 1000 - a) / 1000 @@ -170,7 +172,7 @@ def compute_explanation( else: scores = np.array([importances]) - features = [word["word"] for word in words_trascript] + features = [word["word"] for word in word_timestamps] explanation = ExplanationSpeech( features=features, @@ -178,6 +180,7 @@ def compute_explanation( explainer=self.NAME + "-" + aggregation, target=targets if n_labels > 1 else targets, audio=audio, + word_timestamps=word_timestamps, ) - return explanation \ No newline at end of file + return explanation diff --git a/ferret/explainers/explanation_speech/lime_speech_explainer.py b/ferret/explainers/explanation_speech/lime_speech_explainer.py index afeec59..8dd7416 100644 --- a/ferret/explainers/explanation_speech/lime_speech_explainer.py +++ b/ferret/explainers/explanation_speech/lime_speech_explainer.py @@ -17,8 +17,8 @@ def __init__(self, model_helper): def compute_explanation( self, audio: FerretAudio, + word_timestamps: List, target_class=None, - words_trascript: List = None, removal_type: str = "silence", num_samples: int = 1000, ) -> ExplanationSpeech: @@ -56,9 +56,9 @@ def compute_explanation( else: targets = [int(np.argmax(logits_original, axis=1)[0])] - if words_trascript is None: - # Transcribe audio - words_trascript = audio.transcription + # if word_timestamps is None: + # Transcribe audio + # word_timestamps = audio.transcription audio_np = audio_array.reshape(1, -1) # Get the start and end indexes of the words. These will be used to split the audio and derive LIME interpretable features @@ -67,7 +67,7 @@ def compute_explanation( splits = [] old_start = 0 a, b = 0, 0 - for word in words_trascript: + for word in word_timestamps: start, end = int((word["start"] + a) * sampling_rate), int( (word["end"] + b) * sampling_rate ) @@ -108,9 +108,7 @@ def compute_explanation( map_scores = {k: v for k, v in exp.as_map()[target_class]} map_scores = { k: v - for k, v in sorted( - map_scores.items(), key=lambda x: x[0], reverse=False - ) + for k, v in sorted(map_scores.items(), key=lambda x: x[0], reverse=False) } # Remove the 'empty' spans, the spans between words @@ -139,6 +137,7 @@ def compute_explanation( explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else targets, audio=audio, + word_timestamps=word_timestamps, ) - return explanation \ No newline at end of file + return explanation diff --git a/ferret/explainers/explanation_speech/loo_speech_explainer.py b/ferret/explainers/explanation_speech/loo_speech_explainer.py index a10bef7..7d8e103 100644 --- a/ferret/explainers/explanation_speech/loo_speech_explainer.py +++ b/ferret/explainers/explanation_speech/loo_speech_explainer.py @@ -21,8 +21,8 @@ def __init__(self, model_helper): def remove_words( self, audio: FerretAudio, + word_timestamps: List, removal_type: str = "nothing", - words_trascript: List = None, display_audio: bool = False, ) -> Tuple[List[AudioSegment], List[Dict[str, Union[str, float]]]]: """ @@ -33,22 +33,16 @@ def remove_words( - pink noise """ - ## Transcribe audio - # TODO GA: transcribing audio might be an operation need by other explainers. I suggest we move it into FerretAudio or somewhere else such that can be done once and then shared (e.g., a method in the SpeechBenchmark class) - # transcription moved to the FerretAudio Class - if words_trascript is None: - words_trascript = audio.transcription - ## Load audio as pydub.AudioSegment pydub_segment = audio.to_pydub() ## Remove word audio_no_words = list() - for word in words_trascript: + for word in word_timestamps: audio_removed = remove_word(pydub_segment, word, removal_type) - # to use remove_word_np after implementing the numpy array version of pink noise and white noise + # to use remove_word_np after implementing the numpy array version of pink noise and white noise # audio_removed = remove_word_np(audio.array, audio.sample_rate, word, removal_type ) # audio_no_words.append(audio_removed) @@ -58,14 +52,14 @@ def remove_words( print(word["word"]) display(audio_removed) - return audio_no_words, words_trascript + return audio_no_words, word_timestamps def compute_explanation( self, audio: FerretAudio, target_class=None, removal_type: str = None, - words_trascript: List = None, + word_timestamps: List = None, ) -> ExplanationSpeech: """ Computes the importance of each word in the audio. @@ -73,7 +67,7 @@ def compute_explanation( ## Get modified audio by leaving a single word out and the words modified_audios, words = self.remove_words( - audio, removal_type, words_trascript=words_trascript + audio=audio, word_timestamps=word_timestamps, removal_type=removal_type ) logits_modified = self.model_helper.predict(modified_audios) @@ -128,6 +122,7 @@ def compute_explanation( explainer=self.NAME + "+" + removal_type, target=targets if n_labels > 1 else [targets], audio=audio, # TODO GA: I don't know if this is something we want to keep + word_timestamps=word_timestamps, ) return explanation diff --git a/ferret/speechxai_utils.py b/ferret/speechxai_utils.py index 9c07a31..d9a4175 100644 --- a/ferret/speechxai_utils.py +++ b/ferret/speechxai_utils.py @@ -9,42 +9,161 @@ from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor import librosa import whisperx -from typing import Dict, List, Union, Tuple +from typing import Dict, List, Union, Tuple, Optional + + +class FerretAudio: + """ + Internal class to handle audio data. We force signal to be mono and of type np.float32 (i.e., 4 bytes to represent each sample). + """ + + def __init__( + self, + audio_path_or_array: Union[str, np.ndarray], + current_sr: Optional[int] = None, + ): + self.audio_path_or_array = audio_path_or_array + self.current_sr = current_sr + self._transcription = None + + if isinstance(audio_path_or_array, str): + self.array, self.current_sr = librosa.load( + audio_path_or_array, sr=None, dtype=np.float32 + ) + elif isinstance(audio_path_or_array, np.ndarray): + if current_sr is None: + raise ValueError( + "If audio is provided as a numpy array, the native sampling rate (native_sr arg) must be provided" + ) + self.array = audio_path_or_array + else: + raise ValueError( + "audio_path_or_array must be a string (path to audio file) or a numpy array" + ) + + # check dimentions and channels + if self.array.ndim > 2 or (self.array.ndim == 2 and self.array.shape[1] != 1): + raise ValueError( + "Audio must be mono in either the format (n_samples,) or (n_samples, 1)" + ) + + # reshape to (n_samples, 1) if needed + # TODO: is this needed? + self.array = self.array.reshape(-1, 1) + + @property + def _is_normalized(self) -> bool: + """Check if the array is already normalized.""" + return np.max(np.abs(self.array)) <= 1.0 + + @property + def normalized_array(self) -> np.ndarray: + return self.array / 32768.0 if not self._is_normalized else self.array + + # @property + # def transcription(self): + # if self._transcription is None: + # if ( + # ): + # _, self._transcription = transcribe_audio( + # audio=self.normalized_array, # is normalization needed when transcribing? i am assumimg so + # device="cuda", + # batch_size=2, + # compute_type="float32", + # ) + # else: + # return self._transcription + + def resample(self, target_sr: int): + """ + Resample the audio to the target sampling rate. In place operation. + """ + self.array = librosa.resample( + self.array, orig_sr=self.current_sr, target_sr=target_sr + ) + self.current_sr = target_sr + + @staticmethod + def unnormalize_array(arr, dtype=np.int16): + """ + Given a NumPy array normalized in `[-1, 1]`, returns an array rescaled + in `[-max, max]`, where `max` is the maximum (in absolute value) + (integer) number representable by the selected `dtype`. In practice, + we convert a normalized array of dtype `float32` into a normalized + one of dtype `int16`, as needed to create a PyDub `AudioSegment` + object. + """ + max_val = np.maximum(np.iinfo(dtype).max, np.abs(np.iinfo(dtype).min)) + + return (arr * max_val).astype(dtype) + + def to_pydub(self) -> pydub.AudioSegment: + """ + Converts audio to `pydub.AudioSegment`. + + Notes: + * In order to convert to PyDub `AudioSegment` type we need the + array to be + * of dtype int16, + * NOT normalized. + Therefore, if the array is normalized, we unnormalize it. + * In any case, PyDub only works with unnormalized arrays of dtype + int16, so that's what we need to pass as the input to + `AudioSegment`. + * Because we only manipulate mono audio, the array can either have + shape `(n_samples, 1)` or `(n_samples,)` (flat array). Either is + fine for PyDub (the extra dimension is taken care of + automatically for mono audio). + """ + if self._is_normalized: + unnormalized_array = self.unnormalize_array(self.array) + else: + unnormalized_array = self.array + + return pydub.AudioSegment( + unnormalized_array.tobytes(), + frame_rate=self.current_sr, + sample_width=unnormalized_array.dtype.itemsize, + channels=1, + ) def transcribe_audio( audio: np.ndarray, - device: str = "cuda", - batch_size: int = 2, - compute_type: str = "float32", - language: str = "en", - model_name_whisper: str = "large-v2", + # native_sr: int, + device, + batch_size: int, + compute_type: str, + language: str, + model_name_whisper: str, ) -> Tuple[str, List[Dict[str, Union[str, float]]]]: """ - Transcribe audio using whisperx, - and return the text (transcription) and the words with their start and end times. + Transcribe audio using WhisperX, and return the text (transcription) and the words with their start and end times. """ - ## Load whisperx model + ## Load whisperx model. TODO: we should definitely avoid loading the model for *every* sample to subscribe + + device_type = device.type + device_index = device.index + model_whisperx = whisperx.load_model( model_name_whisper, - device, + device=device_type, + device_index=device_index, compute_type=compute_type, language=language, ) - ## Transcribe audio - # TODO: we are assuming that the array does not come already normalized - # audio_array = audio.normalized_array - # The normalization occurs in the FerretAudio Class + # required by whisperx + audio = audio.reshape( + -1, + ).astype(np.float32) - result = model_whisperx.transcribe( - audio, - batch_size=batch_size - ) + result = model_whisperx.transcribe(audio, batch_size=batch_size) model_a, metadata = whisperx.load_align_model( - language_code=result["language"], device=device + language_code=result["language"], device=device_type ) + model_a.to(device) ## Align timestamps result = whisperx.align( @@ -118,116 +237,6 @@ def transcribe_audio_given_model( return text, words -class FerretAudio: - """ - Internal class to handle audio data. We force signal to 1) mono, 2) a sampling rate of 16000, 3) np.float32 (i.e., 4 bytes to represent each sample). - We infer the native sampling rate using librosa. - """ - - def __init__( - self, - audio_path_or_array: Union[str, np.ndarray], - native_sr: int = None, - model_helper=None, - ): - self.target_sr = 16000 - self.native_sr = native_sr - self.audio_path_or_array = audio_path_or_array - self.model_helper = model_helper - self._transcription = None - - if isinstance(audio_path_or_array, str): - self.native_sr = librosa.get_samplerate(audio_path_or_array) - - # Note: by default, librosa returns an array normalized in [-1,1]. - self.array, self.sample_rate = librosa.load( - audio_path_or_array, sr=self.target_sr, dtype=np.float32 - ) - - elif isinstance(audio_path_or_array, np.ndarray): - if native_sr is None: - raise ValueError( - "If audio is provided as a numpy array, native_sr must be provided" - ) - self.array, self.sample_rate = librosa.resample( - audio_path_or_array, self.native_sr, self.target_sr - ) - - @property - def is_normalized(self) -> bool: - """Check if the array is already normalized.""" - return np.max(np.abs(self.array)) <= 1.0 - - @property - def normalized_array(self) -> np.ndarray: - if not self.is_normalized: - return self.array / 32768.0 - else: - return self.array - - @property - def transcription(self): - if self._transcription is None: - if self.model_helper and hasattr(self.model_helper, 'device') and hasattr(self.model_helper, 'language'): - _ , self._transcription = transcribe_audio( - audio=self.normalized_array, # is normalization needed when transcribing? i am assumimg so - device=self.model_helper.device.type, - batch_size=2, - compute_type="float32", - language=self.model_helper.language, - ) - else: - raise AttributeError("model_helper is not correctly configured") - return self._transcription - - @staticmethod - def unnormalize_array(arr, dtype=np.int16): - """ - Given a NumPy array normalized in `[-1, 1]`, returns an array rescaled - in `[-max, max]`, where `max` is the maximum (in absolute value) - (integer) number representable by the selected `dtype`. In practice, - we convert a normalized array of dtype `float32` into a normalized - one of dtype `int16`, as needed to create a PyDub `AudioSegment` - object. - """ - max_val = np.maximum( - np.iinfo(dtype).max, - np.abs(np.iinfo(dtype).min) - ) - - return (arr * max_val).astype(dtype) - - def to_pydub(self) -> pydub.AudioSegment: - """ - Converts audio to `pydub.AudioSegment`. - - Notes: - * In order to convert to PyDub `AudioSegment` type we need the - array to be - * of dtype int16, - * NOT normalized. - Therefore, if the array is normalized, we unnormalize it. - * In any case, PyDub only works with unnormalized arrays of dtype - int16, so that's what we need to pass as the input to - `AudioSegment`. - * Because we only manipulate mono audio, the array can either have - shape `(n_samples, 1)` or `(n_samples,)` (flat array). Either is - fine for PyDub (the extra dimension is taken care of - automatically for mono audio). - """ - if self.is_normalized: - unnormalized_array = self.unnormalize_array(self.array) - else: - unnormalized_array = self.array - - return pydub.AudioSegment( - unnormalized_array.tobytes(), - frame_rate=self.target_sr, - sample_width=unnormalized_array.dtype.itemsize, - channels=1, - ) - - def pydub_to_np(audio: pydub.AudioSegment) -> Tuple[np.ndarray, int]: """ Converts pydub audio segment into np.float32 of shape [duration_in_seconds*sample_rate, channels], From bd9b5faeebcb52569223be7577bcaa2e87c94340 Mon Sep 17 00:00:00 2001 From: Giuseppe Attanasio Date: Tue, 19 Mar 2024 20:45:33 +0000 Subject: [PATCH 08/21] - added some dependencies - updated the new notebook - adapted the paraling explainer - [WIP] code crashed if no ffmpeg is found on the machine --- examples/speech/new_notebook.ipynb | 66 ++-------- ferret/benchmark_speech.py | 21 ++-- .../paraling_speech_explainer.py | 116 ++++++++++-------- pyproject.toml | 3 + 4 files changed, 91 insertions(+), 115 deletions(-) diff --git a/examples/speech/new_notebook.ipynb b/examples/speech/new_notebook.ipynb index 3a4b0ae..eec1ad7 100644 --- a/examples/speech/new_notebook.ipynb +++ b/examples/speech/new_notebook.ipynb @@ -19,23 +19,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n", - "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/pydub/utils.py:170: RuntimeWarning: Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work\n", - " warn(\"Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work\", RuntimeWarning)\n", - "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/pyannote/audio/core/io.py:43: UserWarning: torchaudio._backend.set_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n", - " torchaudio.set_audio_backend(\"soundfile\")\n", - "torchvision is not available - cannot save figures\n" - ] - } - ], + "outputs": [], "source": [ "from datasets import Dataset, load_dataset\n", "from IPython.display import display\n", @@ -52,7 +38,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -61,7 +47,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -86,7 +72,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -98,7 +84,7 @@ "})" ] }, - "execution_count": 5, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -110,7 +96,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -127,7 +113,7 @@ " 'instruction': 'Recognize the action behind the verbal expression. The answer could be activate, bring, change language, deactivate, decrease, or increase.'}" ] }, - "execution_count": 6, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -153,7 +139,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -197,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -437,42 +423,14 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": null, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Transcribing audio to get word level timestamps...\n" - ] - }, { "name": "stderr", "output_type": "stream", "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", - "Transcribed audio with whisperX into: Increase the temperature in the washroom.\n" - ] - }, - { - "ename": "TypeError", - "evalue": "compute_explanation() got an unexpected keyword argument 'audio'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[32], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m explain_table \u001b[38;5;241m=\u001b[39m \u001b[43mbenchmark\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexplain\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[43maudio_path_or_array\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msample\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43maudio\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43marray\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3\u001b[0m \u001b[43m \u001b[49m\u001b[43mcurrent_sr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msample\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43maudio\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msampling_rate\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethodology\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mperturb_paraling\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m)\u001b[49m\n\u001b[1;32m 6\u001b[0m display(benchmark\u001b[38;5;241m.\u001b[39mshow_table(explain_table, decimals\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m))\n", - "File \u001b[0;32m~/ferret/ferret/benchmark_speech.py:179\u001b[0m, in \u001b[0;36mSpeechBenchmark.explain\u001b[0;34m(self, audio_path_or_array, current_sr, target_class, methodology, perturbation_types, removal_type, aggregation, num_samples, word_timestamps, verbose, verbose_target)\u001b[0m\n\u001b[1;32m 177\u001b[0m explainer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexplainers[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mperturb_paraling\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[1;32m 178\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m perturbation_type \u001b[38;5;129;01min\u001b[39;00m perturbation_types:\n\u001b[0;32m--> 179\u001b[0m explanation \u001b[38;5;241m=\u001b[39m \u001b[43mexplainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompute_explanation\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 180\u001b[0m \u001b[43m \u001b[49m\u001b[43maudio\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mferret_audio\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 181\u001b[0m \u001b[43m \u001b[49m\u001b[43mtarget_class\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtarget_class\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 182\u001b[0m \u001b[43m \u001b[49m\u001b[43mperturbation_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mperturbation_type\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 183\u001b[0m \u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverbose\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 184\u001b[0m \u001b[43m \u001b[49m\u001b[43mverbose_target\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverbose_target\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 185\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 186\u001b[0m explanations\u001b[38;5;241m.\u001b[39mappend(explanation)\n\u001b[1;32m 188\u001b[0m \u001b[38;5;66;03m# table = self.create_table(importances)\u001b[39;00m\n\u001b[1;32m 189\u001b[0m \u001b[38;5;66;03m## Get the importance of each word\u001b[39;00m\n\u001b[1;32m 190\u001b[0m \u001b[38;5;66;03m# elif:\u001b[39;00m\n\u001b[1;32m 191\u001b[0m \n\u001b[1;32m 192\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n", - "\u001b[0;31mTypeError\u001b[0m: compute_explanation() got an unexpected keyword argument 'audio'" + "Perturbation type: 75%|█████████████████████████████████████████████████████████████████████ | 6/8 [01:13<00:11, 5.92s/it]" ] } ], diff --git a/ferret/benchmark_speech.py b/ferret/benchmark_speech.py index 3979a46..3364084 100644 --- a/ferret/benchmark_speech.py +++ b/ferret/benchmark_speech.py @@ -12,6 +12,7 @@ ParalinguisticSpeechExplainer, ) from .speechxai_utils import FerretAudio, transcribe_audio +from tqdm.autonotebook import tqdm SCORES_PALETTE = sns.diverging_palette(240, 10, as_cmap=True) @@ -163,19 +164,11 @@ def explain( # 1. Run sanity checks ferret_audio = FerretAudio(audio_path_or_array, current_sr=current_sr) - # 2. We will need word level transcripts, let's force generate them if not provided - if word_timestamps is None: - print("Transcribing audio to get word level timestamps...") - text, word_timestamps = self.transcribe( - audio_path_or_array=audio_path_or_array, current_sr=current_sr - ) - print(f"Transcribed audio with whisperX into: {text}") - ## Get the importance of each class (action, object, location) according to the perturb_paraling type if methodology == "perturb_paraling": explanations = [] explainer = self.explainers["perturb_paraling"] - for perturbation_type in perturbation_types: + for perturbation_type in tqdm(perturbation_types, desc="Perturbation type"): explanation = explainer.compute_explanation( audio=ferret_audio, target_class=target_class, @@ -190,12 +183,22 @@ def explain( # elif: else: + if methodology not in self.explainers: raise ValueError( f"Explainer {methodology} not supported. Choose between " '"LOO", "Gradient", "GradientXInput", "LIME", ' '"perturb_paraling"' ) + + # 2. We will need word level transcripts, let's force generate them if not provided + if word_timestamps is None: + print("Transcribing audio to get word level timestamps...") + text, word_timestamps = self.transcribe( + audio_path_or_array=audio_path_or_array, current_sr=current_sr + ) + print(f"Transcribed audio with whisperX into: {text}") + if "LOO" in methodology: explainer_args["removal_type"] = removal_type elif "LIME" in methodology: diff --git a/ferret/explainers/explanation_speech/paraling_speech_explainer.py b/ferret/explainers/explanation_speech/paraling_speech_explainer.py index 6c7bfc2..ac18d34 100644 --- a/ferret/explainers/explanation_speech/paraling_speech_explainer.py +++ b/ferret/explainers/explanation_speech/paraling_speech_explainer.py @@ -1,4 +1,5 @@ """Paralinguistic Speech Explainer module""" + import os import numpy as np import pandas as pd @@ -14,7 +15,16 @@ PolarityInversion, ) from .explanation_speech import ExplanationSpeech -from ...speechxai_utils import pydub_to_np +from ...speechxai_utils import pydub_to_np, FerretAudio +import torchaudio.functional as F +import torch +from audiostretchy.stretch import AudioStretch +import audio_effects +import tempfile +from io import BytesIO +import requests + +from copy import deepcopy # If True, We use the audiostretchy library to perform time stretching @@ -25,6 +35,12 @@ REFERENCE_STR = "-" +ENDPOINTS = { + "WHITE_NOISE": "https://github.com/g8a9/ferret/raw/feat/support-speech-from-array/ferret/explainers/explanation_speech/white_noise.mp3", + "PINK_NOISE": "https://github.com/g8a9/ferret/raw/feat/support-speech-from-array/ferret/explainers/explanation_speech/pink_noise.mp3" +} + + def _tmp_log1( verbose_target, original_gt, @@ -41,22 +57,22 @@ def _tmp_log1( print("m", modified_trg) -def _tmp_log2( - verbose_target, - original_gt, - modified_trg, - n_labels, -): - if n_labels > 1: - print_log( - [ - original_gt[verbose_target] - modified_trg[verbose_target][i] - for i in range(modified_trg[verbose_target].shape[0]) - ] - ) +# def _tmp_log2( +# verbose_target, +# original_gt, +# modified_trg, +# n_labels, +# ): +# if n_labels > 1: +# print_log( +# [ +# original_gt[verbose_target] - modified_trg[verbose_target][i] +# for i in range(modified_trg[verbose_target].shape[0]) +# ] +# ) - else: - print_log([original_gt - modified_trg[i] for i in range(modified_trg.shape[0])]) +# else: +# print_log([original_gt - modified_trg[i] for i in range(modified_trg.shape[0])]) class ParalinguisticSpeechExplainer: @@ -131,7 +147,6 @@ def augmentation( def time_stretching_augmentation( self, audio_as: AudioSegment, perturbation_value: float ): - import audio_effects if perturbation_value < 1: perturbed_audio_as = audio_effects.speed_down(audio_as, perturbation_value) @@ -141,14 +156,19 @@ def time_stretching_augmentation( return perturbed_audio.squeeze() def time_stretching_augmentation_AudioStretch( - self, audio_path: str, perturbation_value: float + self, audio: FerretAudio, perturbation_value: float ): - from audiostretchy.stretch import AudioStretch + pydub_segment = audio.to_pydub() audio_stretch = AudioStretch() - audio_stretch.open(audio_path) - audio_stretch.stretch(ratio=perturbation_value) - perturbated_audio_samples = np.array(audio_stretch.samples, dtype=np.float32) + with tempfile.NamedTemporaryFile(suffix=".wav") as temp_audio: + pydub_segment.export(temp_audio.name, format="wav") + temp_audio.seek(0) + + audio_stretch.open(temp_audio.name) + audio_stretch.stretch(ratio=perturbation_value) + perturbated_audio_samples = np.array(audio_stretch.samples, dtype=np.float32) + return perturbated_audio_samples def pitch_shifting_augmentation( @@ -172,13 +192,11 @@ def add_white_noise_torchaudio(self, original_speech, noise_rate): noise_rate: signal-to-noise ratios in dB """ - import torchaudio.functional as F - from copy import deepcopy - import torch - - WHITE_NOISE = os.path.join(os.path.dirname(__file__), "white_noise.mp3") + # WHITE_NOISE = os.path.join(os.path.dirname(__file__), "white_noise.mp3") + # noise_as = AudioSegment.from_mp3(WHITE_NOISE) - noise_as = AudioSegment.from_mp3(WHITE_NOISE) + res = requests.get(ENDPOINTS["WHITE_NOISE"]) + noise_as = AudioSegment.from_file(BytesIO(res.content), "mp3") noise, frame_rate = pydub_to_np(noise_as) # Reshape and convert to torch tensor @@ -205,9 +223,6 @@ def change_pitch_torchaudio(self, original_speech, frame_rate, perturbation_valu perturbation_value: """ - import torchaudio.functional as F - import torch - # Reshape and convert to torch tensor audio_t = torch.tensor(original_speech.reshape(1, -1)) perturbated_audio = F.pitch_shift( @@ -218,7 +233,7 @@ def change_pitch_torchaudio(self, original_speech, frame_rate, perturbation_valu def perturbe_waveform( self, - audio_path: str, + audio: FerretAudio, perturbation_type: str, return_perturbations=False, verbose: bool = False, @@ -233,8 +248,8 @@ def perturbe_waveform( """ ## Load audio as pydub.AudioSegment - audio_as = AudioSegment.from_wav(audio_path) - audio, frame_rate = pydub_to_np(audio_as) + # audio_as = AudioSegment.from_wav(audio_path) + # audio, frame_rate = pydub_to_np(audio_as) ## Perturbate audio perturbated_audios = [] @@ -306,9 +321,7 @@ def perturbe_waveform( raise ValueError(f"Perturbation '{perturbation_type}' is not available") if verbose: - from IPython.display import Audio - print_log("Original audio") # Display the original audio and show its info for a single class self._tmp_log_show_info( "Original audio", @@ -317,27 +330,31 @@ def perturbe_waveform( verbose_target, ) + pydub_segment = audio.to_pydub() + audio_array = audio.array for perturbation_value in perturbations: if "time stretching" in perturbation_type: if USE_AUDIOSTRETCH: perturbated_audio = self.time_stretching_augmentation_AudioStretch( - audio_path, perturbation_value + audio=audio, perturbation_value=perturbation_value ) else: perturbated_audio = self.time_stretching_augmentation( - audio_as, perturbation_value + pydub_segment, perturbation_value ) elif "pitch shifting" in perturbation_type: # perturbated_audio = self.pitch_shifting_augmentation( # audio_as, perturbation_value # ) perturbated_audio = self.change_pitch_torchaudio( - audio, frame_rate, perturbation_value + audio.array, + audio.current_sr, + perturbation_value, # TODO: Assuming frame rate == sampling rate ) elif perturbation_type == "noise" and USE_ADD_NOISE_TORCHAUDIO: perturbated_audio = self.add_white_noise_torchaudio( - audio, perturbation_value + audio.array, perturbation_value ) else: augment = self.augmentation( @@ -345,7 +362,7 @@ def perturbe_waveform( perturbation_type=perturbation_type, ) perturbated_audio = augment( - samples=audio.squeeze(), sample_rate=frame_rate + samples=audio.array.squeeze(), sample_rate=audio.current_sr ) if verbose: @@ -365,7 +382,7 @@ def perturbe_waveform( def compute_explanation( self, - audio_path: str, + audio: FerretAudio, target_class=None, perturbation_type: str = None, verbose: bool = False, @@ -376,7 +393,7 @@ def compute_explanation( """ modified_audios = self.perturbe_waveform( - audio_path, + audio, perturbation_type, verbose=verbose, verbose_target=verbose_target, @@ -386,9 +403,7 @@ def compute_explanation( logits_modified = self.model_helper.predict(modified_audios) - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] - - logits_original = self.model_helper.predict([audio]) + logits_original = self.model_helper.predict([audio.array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -411,9 +426,7 @@ def compute_explanation( if n_labels > 1: # Multilabel scenario as for FSC modified_trg = [logits_modified[i][:, targets[i]] for i in range(n_labels)] - original_gt = [ - logits_original[i][:, targets[i]][0] for i in range(n_labels) - ] + original_gt = [logits_original[i][:, targets[i]][0] for i in range(n_labels)] else: modified_trg = logits_modified[:, targets] @@ -421,8 +434,7 @@ def compute_explanation( if verbose: _tmp_log1(verbose_target, original_gt, modified_trg, n_labels) - - _tmp_log2(verbose_target, original_gt, modified_trg, n_labels) + # _tmp_log2(verbose_target, original_gt, modified_trg, n_labels) ## Compute the difference between the ground truth and the modified audio # prediction_diff = original_gt - np.mean(modified_trg) @@ -444,7 +456,7 @@ def compute_explanation( scores=scores, explainer=self.NAME, target=targets if n_labels > 1 else [targets], - audio_path=audio_path, + audio=audio, ) return explanation diff --git a/pyproject.toml b/pyproject.toml index d254af7..e4f3c54 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,6 +48,7 @@ pydub = { version = "0.25.1", optional = true } audiomentations = { version = "0.34.1", optional = true } audiostretchy = { version = "1.3.5", optional = true } pyroomacoustics = { version = "0.7.3", optional = true } +audio-effects = { version = "0.22", optional = true } # The version of WhisperX currently on PyPI has a problem with a dependency, # so the dependency needs to be installed from the GitHub repo, which in turns @@ -62,6 +63,7 @@ speech = [ "audiomentations", "audiostretchy", "pyroomacoustics", + "audio-effects" # "whisperx" ] all = [ @@ -69,6 +71,7 @@ all = [ "audiomentations", "audiostretchy", "pyroomacoustics", + "audio-effects" # "whisperx" ] From fa78095b0a4d244b873c89c48efc4bcb8839fa03 Mon Sep 17 00:00:00 2001 From: Giuseppe Attanasio Date: Tue, 19 Mar 2024 22:55:46 +0000 Subject: [PATCH 09/21] Everything works with no coding errors on the new notebooks. - final edits to methods to update - update the notebook name - WIP need to check that everything returns expected results - WIP need to check that the notebook with local loading works --- examples/speech/audio_from_hf.ipynb | 627 ++++++++++++++++++ examples/speech/new_notebook.ipynb | 504 -------------- ferret/benchmark_speech.py | 16 +- .../paraling_speech_explainer.py | 17 +- 4 files changed, 648 insertions(+), 516 deletions(-) create mode 100644 examples/speech/audio_from_hf.ipynb delete mode 100644 examples/speech/new_notebook.ipynb diff --git a/examples/speech/audio_from_hf.ipynb b/examples/speech/audio_from_hf.ipynb new file mode 100644 index 0000000..171a7dd --- /dev/null +++ b/examples/speech/audio_from_hf.ipynb @@ -0,0 +1,627 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Speech XAI" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/pyannote/audio/core/io.py:43: UserWarning: torchaudio._backend.set_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n", + " torchaudio.set_audio_backend(\"soundfile\")\n", + "torchvision is not available - cannot save figures\n" + ] + } + ], + "source": [ + "from datasets import Dataset, load_dataset\n", + "from IPython.display import display\n", + "import numpy as np \n", + "import os\n", + "import pandas as pd\n", + "from pathlib import Path\n", + "from pydub import AudioSegment\n", + "import torch\n", + "from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor\n", + "\n", + "from ferret import SpeechBenchmark, AOPC_Comprehensiveness_Evaluation_Speech, AOPC_Sufficiency_Evaluation_Speech" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "DATASET_ID = \"DynamicSuperb/IntentClassification_FluentSpeechCommands-Action\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cuda:2\n" + ] + } + ], + "source": [ + "device = 'cuda:2' if torch.cuda.is_available() else 'cpu'\n", + "print(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset({\n", + " features: ['file', 'speakerId', 'transcription', 'audio', 'label', 'instruction'],\n", + " num_rows: 200\n", + "})" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "data = load_dataset(DATASET_ID, split=\"test\")\n", + "data" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'file': 'wavs/speakers/Xygv5loxdZtrywr9/77506ae0-452b-11e9-a843-8db76f4b5e29.wav',\n", + " 'speakerId': 'Xygv5loxdZtrywr9',\n", + " 'transcription': 'Increase the temperature in the washroom',\n", + " 'audio': {'path': '77506ae0-452b-11e9-a843-8db76f4b5e29.wav',\n", + " 'array': array([0. , 0. , 0. , ..., 0.02133179, 0.01977539,\n", + " 0.01849365]),\n", + " 'sampling_rate': 16000},\n", + " 'label': 'increase',\n", + " 'instruction': 'Recognize the action behind the verbal expression. The answer could be activate, bring, change language, deactivate, decrease, or increase.'}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sample = data[0]\n", + "sample" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this notebook we are using Wav2Vec2 which expects audio arrays to be in 16kHz. Luckly, this is the native sampling rate of our data. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Models" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at superb/wav2vec2-base-superb-ic were not used when initializing Wav2Vec2ForSequenceClassification: ['wav2vec2.encoder.pos_conv_embed.conv.weight_g', 'wav2vec2.encoder.pos_conv_embed.conv.weight_v']\n", + "- This IS expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of Wav2Vec2ForSequenceClassification were not initialized from the model checkpoint at superb/wav2vec2-base-superb-ic and are newly initialized: ['wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + } + ], + "source": [ + "## Load model\n", + "model = Wav2Vec2ForSequenceClassification.from_pretrained(\n", + " \"superb/wav2vec2-base-superb-ic\"\n", + ")\n", + "feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n", + " \"superb/wav2vec2-base-superb-ic\"\n", + ")\n", + "\n", + "if torch.cuda.is_available():\n", + " model = model.to(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Speech-XAI: the `SpeechBenchmark` class" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: if not specified otherwise, `SpeechBenchmark` assumes English as the source language." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "## Instantiate benchmark class\n", + "benchmark = SpeechBenchmark(model, feature_extractor, device=device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's start from transcribing the example above using WhisperX." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n" + ] + }, + { + "data": { + "text/plain": [ + "(' Increase the temperature in the washroom.',\n", + " [{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438},\n", + " {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141},\n", + " {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444},\n", + " {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848},\n", + " {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953},\n", + " {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text, word_timestamps = benchmark.transcribe(\n", + " sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + ")\n", + "text, word_timestamps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Explain word importance" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Word importance" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.47325948, -0.45515063, -0.10200211, -0.15734437, -0.12148061,\n", + " 0.0109534 ],\n", + " [ 0.07733697, -0.02064097, 0.34651279, -0.01588559, -0.01463729,\n", + " -0.02365428],\n", + " [-0.01432282, -0.01848161, -0.00988954, -0.00070852, -0.01123005,\n", + " 0.32860303]]), explainer='loo_speech+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + ] + } + ], + "source": [ + "explanation = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + " methodology='LOO',\n", + " word_timestamps=word_timestamps\n", + ")\n", + "# display(benchmark.show_table(explanation, decimals=3))\n", + "print(explanation)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 2.73476301e-01, -2.75996308e-02, 2.68968859e-02,\n", + " 4.38230033e-02, -9.83693653e-03, 3.43606501e-02],\n", + " [-4.55664511e-02, 2.00727565e-04, 3.07805104e-01,\n", + " -7.30904579e-03, 8.18154319e-03, 1.45066594e-01],\n", + " [ 7.67946057e-02, -1.63121582e-02, 1.69544374e-01,\n", + " 1.03233484e-02, 6.95427995e-02, 4.02942428e-01]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + ] + } + ], + "source": [ + "explanation = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"], \n", + " methodology='LIME',\n", + " word_timestamps=word_timestamps\n", + ")\n", + "print(explanation)\n", + "#display(benchmark.show_table(explanation, decimals=3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can run the same function but with no word timestamps. The class will generate them automatically." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Transcribing audio to get word level timestamps...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", + "Transcribed audio with whisperX into: Increase the temperature in the washroom.\n", + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.30518979, -0.05905298, 0.02406042, 0.06312685, -0.01027066,\n", + " 0.00634839],\n", + " [-0.00192933, 0.04791304, 0.30365684, 0.01351917, -0.02577572,\n", + " 0.13388124],\n", + " [ 0.07868745, -0.02967894, 0.21510287, 0.02970933, 0.03952176,\n", + " 0.44306288]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + ] + } + ], + "source": [ + "explanation = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"], \n", + " methodology='LIME',\n", + ")\n", + "print(explanation)\n", + "#display(benchmark.show_table(explanation, decimals=3))" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(EvaluationSpeech(name='aopc_compr_speech', score=[0.3684091712348163, 0.24896600097417831, 0.5148161690682173], target=[3, 4, 3]),\n", + " EvaluationSpeech(name='aopc_suff', score=[0.01417614333331585, -0.004319131374359131, -0.01769007444381714], target=[3, 4, 3]))" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aopc_compr = AOPC_Comprehensiveness_Evaluation_Speech(benchmark.model_helper)\n", + "evaluation_output_c = aopc_compr.compute_evaluation(explanation)\n", + "\n", + "aopc_suff = AOPC_Sufficiency_Evaluation_Speech(benchmark.model_helper)\n", + "evaluation_output_s = aopc_suff.compute_evaluation(explanation)\n", + "\n", + "evaluation_output_c, evaluation_output_s" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Explain paralinguistic impact" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Perturbation type: 100%|████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [01:29<00:00, 11.25s/it]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 pitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoise
action=increase0.310.240.410.330.330.330.190.27
object=heat0.250.190.33-0.02-0.02-0.02-0.020.23
location=washroom0.020.020.02-0.02-0.02-0.020.000.70
\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "explain_table = benchmark.explain(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + " methodology='perturb_paraling',\n", + ")\n", + "display(benchmark.show_table(explain_table, decimals=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Show variation" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "perturbation_types = ['time stretching', 'pitch shifting', 'reverberation', 'noise']\n", + "variations_table = benchmark.explain_variations(\n", + " audio_path_or_array=sample[\"audio\"][\"array\"],\n", + " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", + " perturbation_types=perturbation_types\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcYAAAGZCAYAAAATupELAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAACdEklEQVR4nOzdd1xV9f/A8dcF4QKXLYqguAUXpuYegCIouFJzm6s0R2auFEdqOVPTsvxm3wwzR4ozV5IJoomrr5aZZq4C92A4AAXO7w9+nLiCcC+xfT99nMfDc/h8PudzzgXefM75DI2iKApCCCGEAMCksCsghBBCFCUSGIUQQogMJDAKIYQQGUhgFEIIITKQwCiEEEJkIIFRCCGEyEACoxBCCJGBBEYhhBAiAwmMQgghRAYSGEWxsn37dlasWJHp+ODBg6lbt24h1MhwsbGxzJo1i99//z1Py33ePcnJ1atX0Wg0bN68Odt0s2bNwtraOrfVE6LYkcAoipXnBYEZM2awfv36QqiR4WJjY5k9e3aRCYyGeuONNwgLC8u38oUoakoVdgWEyAvVqlUr7CrkqYSEBCwtLQu7GgBUqFCBChUqFHY1hCgw0mIUxcbgwYP5+uuvOXv2LBqNBo1Gw+DBg9WvZXyUunr1ajQaDSdPnsTf3x8rKys8PDzYv38/qampTJ8+HWdnZ5ydnQkKCiI1NVXvXOfOnaNr167Y2dmh0+no2LEjly5dyrGOCxYsoHr16lhYWFCmTBnatWvHlStXuHr1KlWqVAGgZ8+eav2vXr2qPtJcvXo1w4YNo3Tp0jRp0gSApKQkpk6dSqVKldBqtdSqVUuvZZzdPQGIjIzE398fW1tbbGxsaNq0KT/88INenRMTE3nrrbdwcHDAxcWFiRMnkpycrH792Uep4eHhaDQafvjhB/r164eNjQ2VKlXiww8/zHQ/Vq5cSaVKlbCyssLPz49Tp06p1ypEUSUtRlFszJgxgzt37nD+/HnWrVsHQJkyZbLNM3DgQEaMGMGECRNYsGAB3bt3Z9CgQcTHx7NmzRqOHTvGzJkz8fT0pF+/fgBcvnyZFi1aULduXVavXo2JiQlz587F19eXP/74A61Wm+W51qxZw4wZM3j//fdp3rw5cXFxHDp0iPj4eGrWrMnWrVvp3r078+bNo02bNgC4uLhw48YNAIKCgujYsSMbNmxQA3WvXr04fPgwM2fOpFatWuzZs4cBAwbg4OBAQEBAtvfkp59+om3btjRr1owvv/wSe3t7Tp48yd9//61X72nTptG1a1c2bdrEkSNHmDVrFtWrV2fEiBHZ3tsRI0bw2muvsW3bNrZv387kyZOpV68eHTp0AOC7775jxIgRvPHGG7z66qucPn2aXr16ZVumEEWCIkQxMmjQIKVOnTo5Hg8ODlYAZcWKFeqxM2fOKIDSrFkzvbwvv/yy8sorr6j7AwcOVKpWraokJCSox27fvq1YW1srn3322XPrNnr0aKVhw4bP/fqVK1cUQAkJCcnyeIcOHfSOHzhwQAGUffv26R3v3bu30rhx4+dee7oWLVootWvXVpKTk7OtT8+ePfWOe3t7K76+vur+zJkzFZ1Op+6HhYUpgDJp0iT1WGpqqlK5cmXl9ddfV481btxYadu2rV7ZH3zwgQIowcHBWdZJiKJAHqWKEs3Pz0/9v7u7OwC+vr56adzd3YmKilL3Q0ND6dKlC6VKlSI5OZnk5GQcHBxo0KABJ06ceO65GjZsyKlTpxg/fjyHDx/m6dOnRtW1Y8eOevuhoaE4OjrStm1btR7JycnqI8mUlJTnlvX48WOOHj3KoEGDMDU1zfa8/v7+evu1a9cmOjo6x/pmzKfRaKhVq5aaLyUlhVOnTtGlSxe9PF27ds2xXCEKmwRGUaLZ29ur/zc3N890LP14YmKiun/37l2WLVuGmZmZ3nbo0CG9APqswYMHs3TpUvbt20fr1q0pU6YMY8eOJSEhwaC6Ojs76+3fvXuX+/fvZ6rHG2+8QXJysvoINisxMTGkpqbi6uqa43lzuh+5yXfnzh2Sk5MzPeouW7ZsjuUKUdjkHaMQz3B0dKRjx46MGjUq09dsbGyem8/ExISxY8cyduxYrl27xrfffsuUKVNwcnJixowZOZ5Xo9FkqkeZMmXYs2dPlumzCzL29vaYmJhw/fr1HM+bH8qUKUOpUqW4c+eO3vHbt28XSn2EMIYERlGsGNqa+TfatWvHb7/9RoMGDXJ8DPk85cuXZ8KECaxfv55z584B/7RYDa1/u3bt+PDDDzE3N6devXrPTZfVPdHpdDRv3pw1a9YwYcKEXF9HbpmamtKgQQN27NjB2LFj1ePbt28v0HoIkRsSGEWxUqtWLb766is2bNhAjRo1cHJyonLlynl6jtmzZ9O4cWPat2/P8OHDcXZ25ubNmxw8eJDWrVvTt2/fLPO9+eabODg40KxZMxwcHPjpp5/45Zdf1JZnuXLlsLe3Z8OGDVSpUgWtVpttwPPz86Nz58506NCBd999l3r16vHo0SPOnj3LxYsX+fLLL7O9JwsWLKBt27a0a9eOUaNG4eDgwP/+9z+cnJwYOnRont6zrEyfPp2uXbsybNgwevbsyalTp/j666+BtNa1EEWVfHeKYuX111+nZ8+ejBkzhsaNGzNr1qw8P0f16tU5fvw4pUuXZtSoUbRv354pU6bw6NGjbANZixYtOHz4MK+//jodOnRg3bp1LF26lNdffx1ICwbBwcFcuXIFX19fGjdunOOjzs2bNzNixAhWrFhBQEAAr7/+OqGhoXh7e6tpnndPWrVqpY45HDx4MN27d2fbtm1UqlTp398kA3Tp0oX//Oc/7Nu3j65du7J3717+85//AGBnZ1cgdRAiNzSKoiiFXQkhxIth1apVvPHGG1y5ciXPW/pC5BV5lCqEyBf3799n9uzZtG3bFhsbG06cOMHcuXPp2rWrBEVRpElgFELkCzMzMy5dusT69euJjY2lTJkyvPbaayxcuLCwqyZEtuRRqhBCCJGBdL4RQgghMpDAKIQQQmQg7xiLiNTUVK5fv46NjU2mGVCEEC8WRVF48OABrq6uMuazEEhgLCKuX7+Om5tbYVdDCFGEREVFySLRhUACYxGRPgfnxT0nsdFZ55BaX+KNB0afT1fV0eg8AA9+e/7E1dmxqGCfq3ymlmZG50m88TB357LK3Y+Dtqxxn1e6lIo6o/M8+Sl3c58m38/dPTGxynrtyZxY+Bo/iUBury010bhVTNKZWBj/vQVg5Zn9GqBZSfozxqj0Dx4/xKNni2zn5hX5p8AC4+DBgzl58iS//fZbQZ2yWEl/fGqjs8bW2rgfBnMr4zsW64w8h8rK+CAMYKnL3flMrYz/5WVulatTYarL3S9KrXUuA6Ot8fme6HJ3/5/mcnpZ09wGRltbo/Pk9tpSTZ7kKp+JpXmu8lnZGH9tSbrkXJ1LXqsUjgILjDNmzODRo0cFdTohhBAiVwosMFarVq1AzpOQkIClpWWBnEsIIUTJU2DdnQYPHkzdunUBWL16NRqNhlOnThEQEIBOp6NGjRqsWbMmU77du3fTsmVLrKyscHBwwMfHh1OnTgGoEyTv3r2bV199FVtbW3r27AlAbGwso0aNwsXFBa1Wy8svv0xoaGimsv38/Chbtiy2trY0bdqU77//Xi9NbGwsw4YNo3z58lhYWODm5kafPn300kRHRzNgwACcnJywtLTEy8uLn3/+Oc/unRBCiIJTqP2A+/fvj7+/P9u3b6dBgwYMHjxYXbsOYOPGjXTu3JmyZcuyfv161q1bR8uWLbl27ZpeOcOHD6datWps27aNiRMn8uTJE/z8/Ni1axdz587lu+++o3bt2nTs2JEzZ86o+a5cuULnzp355ptv2LJlCy1btiQwMJDw8HA1zfjx49m1axfz5s1j3759LFq0CK32n/cuMTExtGrVitOnT7N8+XK2bNmCTqejbdu22S7KmpSURHx8vN4mhBCi8BVqr9S33npLXauuRYsW7N69my1btjB9+nQURWHixIn4+/uzbds2NU9gYGCmcrp06aI3/2JwcDCnT5/ml19+oXbt2gC0b9+eP//8kw8++IBNmzap50+XmppKmzZtOHv2LF988QU+Pj4AHD9+nH79+jFo0CA1bcYW47Jly4iNjeX48ePqiuq+vr64u7uzePFiPvzwwyyvff78+cyePduo+yWEECL/FWqL0d/fX/2/TqejUqVKREdHA/DHH38QHR1t0IKqHTt21NsPDQ3F09MTd3d3kpOT1c3Pz48TJ06o6aKjoxk0aBDly5enVKlSmJmZERoayoULF9Q0DRs2ZPXq1SxevDjLHrWhoaG0adMGR0dH9TympqZ4e3vrnetZQUFBxMXFqVtUVFSO1ymEECL/FWqL0d7eXm/f3NycxMS0fuX37t0DwNXVNcdynJ2d9fbv3r3LqVOnMDPL3P3e1NQUSGshdunShbi4ON5//32qV6+OTqfjvffe4++//1bTL1++HEdHR5YsWcKkSZNwc3MjKCiIkSNHquc6evRolufKrsORVqvVeyQrhBCiaCiyA/xLly4NkOMK55B5rI+joyP16tVj1apVz81z8eJFTp06xfbt2+natat6PCEhQS+dnZ0dy5YtY9myZZw5c4aPP/6YUaNGUbduXVq3bo2joyMdOnTggw8+yHQOCXxCCFH8FNnA6OHhQYUKFQgODqZXr15G5W3Xrh179uzB1dX1uS3O9ABobv7PIN+//vqLn376CXd39yzzeHp6snTpUlatWsW5c+do3bo17dq1Y+3atdSqVQudzviZTIQQQhQtRTYwajQaFi9eTN++fenRowcDBw5Eq9USGRlJ48aN6dSp03PzDhw4kJUrV+Lj48PEiRNxd3cnNjaWU6dO8eTJE+bPn0/NmjWpUKECU6ZMISUlhYcPHzJz5kzKly+vV1bLli3p1q0bdevWxdTUlDVr1mBubk7r1q2BtF6r69atw9vbm7Fjx1KxYkXu3LnDsWPHcHV1Zdy4cfl6n4QQQuStIhsYAXr37o2VlRVz586lT58+WFhY0LBhQ7p165ZtPq1Wy4EDB5g1axZz587lxo0bODk50aBBA7UXrFarZevWrYwePZqePXvi5ubG9OnTOXDgACdPnlTLatmyJWvWrOHKlSuYmJjg6enJzp07qVWrFpD2yPfo0aNMnz6dyZMnc+/ePcqWLUuzZs1yrKcQQoiiR6MoivETbYo8Fx8fj52dHddvRmFr5DyTmlx0LlZINTpPcWGCaa7ypZKSxzXJninGz82aQu4mzM7N9wiU7O+TgmTs/Y+Pj8elXHni4uKM/n0g/j1Z6EsIIYTIoMQHxoxT0eW32NhYZs2axe+//14g5xNCCJH3SnxgLEixsbHMnj1bAqMQQhRjEhiFEEKIDF6YwBgeHk6DBg3Q6XQ0adJEb/ULRVFYvHgx7u7uaLVaqlatytKlS/Xynz9/nj59+uDm5oaVlRW1a9dmyZIlpKamdU64evUqVapUAaBnz55oNBo0Gg1Xr14tsGsUQgjx7xXp4Rp55ebNm7z99ttMmTIFOzs7goKC6NatG5cuXcLMzIyxY8fy5ZdfMm3aNJo2bcqRI0eYPHkylpaWjBgxAoBr167h4eFB//79sbGx4fTp08ycOVMd/+ji4sLWrVvp3r078+bNo02bNgC4uLhkWaekpCSSkpLUfVldQwghioYXIjDev3+fgwcPUqdOHSBtwvI2bdpw7NgxXFxc+PTTT/n8888ZPnw4kDZzzuPHj5k9ezbDhw/HxMQEX19ffH19gbQWZqtWrXj8+DGffvopM2fORKvV0qBBAwBq1KhBs2bNsq2TrK4hhBBF0wvxKNXV1VUNioC6FFV0dDT79+8HoEePHnorcbRr146bN2+qq14kJiYyc+ZMqlevjlarxczMjGnTpnHjxg0ePnxodJ1kdQ0hhCiaXogWY1areEBasLt79y6KouDk5JRl3qioKCpVqsTkyZP573//y8yZM3n55Zext7dnx44dzJkzh8TERKytrY2qk6yuIYQQRdMLERiz4+joiEaj4fDhw3oTiqfz8PAAICQkhDfffJPJkyerX9u9e3eB1VMIIUTBeOEDY/p7w3v37tG5c+fnpktISNALnCkpKXz77bd6aTK2RIUQQhRPL3xgdHd3Z/To0bz22mtMmjSJpk2b8vTpUy5cuEBYWBjbt28HwM/Pj//+97/Url0bJycnVqxYoderFKBcuXLY29uzYcMGqlSpglarpV69elm2RIUQQhRNL0Tnm5x88sknzJkzh2+//ZaOHTsyYMAANm7ciLe3t5pm+fLleHt7M2bMGF5//XU8PT2ZOnWqXjkmJiYEBwdz5coVfH19ady4sUELLQshhCg6ZHWNIiJ9dY0bN6/lYnUNjdHnU5CPvbDlZhWQgl4BRBQOWV2jcEmLUQghhMigWARGHx8fOnXqVNjVEEII8QIoFoFRCCGEKCglOjAmJCQUdhWEEEIUM0YFxkOHDqHRaLh06ZJ6rHPnzmg0Gs6ePase69u3Lx07dgRgypQpeHp6Ym1tTfny5enbty83btzQK/enn37Cy8sLOzs7bGxs8PT05Ouvv850/s2bN+Ph4YG1tTVt27bVq8fVq1fRaDSsXr2aYcOGUbp0aZo0aQKkzZU6dOhQnJycsLS0pEWLFkRERGQqf+XKlXh4eKDVaqlcuTJz5sxRV88AWL16NRqNhpMnT+Lv74+VlRUeHh7s37+f1NRUpk+fjrOzM87OzgQFBenlFUIIUTwYFRibNGmChYWFGlRSU1M5fPiw3jGAiIgIvLy8ALh9+zZTp05l9+7dfPzxx1y9ehVvb2+Sk5OBtN5XHTt2xNbWlg0bNrB9+3aGDx9ObGys3rlPnz7NokWLWLBgAatXr+bixYsMGDAgUx2DgoJQFIUNGzawaNEiUlJSCAgIYOfOnSxcuJCQkBCsra3x8/PTW3pq+fLljBgxgvbt27Nz504GDx7MrFmzePfddzOdY+DAgXTq1Ilt27bh6upK9+7dGTt2LFFRUaxZs4bRo0ezYMGCTBMAZJSUlER8fLzeJoQQovAZPVzD29ubqlWrEhwczOnTp2nSpAlDhw4lNjaWb7/9losXL1KjRg2OHDlC8+bN9fKmpKRw8+ZNKlSowL59+/D39+fkyZM0btyYX3/9FU9PzyzP6ePjw8mTJ7ly5QplypQB0lpvQ4YMISoqigoVKqjrIXbo0IG9e/eqeb/77ju6du3K999/T/v27QF4+vQp1atXp1GjRmzZsoWUlBTKlStHu3bt2LBhg5p36tSpLFmyhOvXr1O6dGn1nCtWrGDkyJEA/Pbbb3h6etKsWTMiIyPVvI0aNcLNzY1t27ZleU2zZs3KcnUNGa7x4pDhGuJ5ZLhG4TL6HaOXl5faOoyIiKBRo0YEBAToHbOysqJRo0YA7N27lxYtWmBnZ0epUqWoUKECABcuXACgWrVq2NraMnLkSDZt2sSdO3eyPG/9+vXVoAj6K2RklP4IN92hQ4ewtbVVgyKAmZkZ3bt35/Dhw0DaIsR3796lZ8+eenl79+7NkydPOH78uN5xPz8/9f/u7u7AP1PLZTye3YoZsrqGEEIUTUYHRm9vby5fvsy1a9fUR6atW7fm5s2b/Pnnn0RERNCsWTPMzMw4ceIEXbp0wdXVlW+++YbIyEiOHj0K/DOfqIODAz/88AM2Nja89tprlCtXDh8fH86cOaN33uxWyMjI2dlZbz8mJoayZctmug5nZ2fu37+vpskqb/p+erqs6pJej6zql92cqVqtFltbW71NCCFE4TM6MDZv3hwzMzMiIiI4dOgQXl5eODo6UqdOHQ4ePEhERAStW7cGYNu2bdjZ2bFp0ya6dOlCs2bNKFeuXKYymzRpwt69e4mNjWXnzp3cvn2bV155JVcXpNHoP1Z0dHTk9u3bmdLdunULR0dHNQ2QKd2tW7f0vi6EEKLkMzow6nQ6GjZsyMqVK7l37x6tWrUC0lqS69at48qVK2rHm4SEBMzMzPSC1bp1655btqWlJYGBgYwcOZIrV67kySoVrVq1Ij4+ntDQUPVYcnIy27ZtU+vu4eFBmTJlCAkJ0cu7adMmzM3N1d6tQgghSr5cra7h5eXFokWLaNiwofoI0MvLi88++wwzMzO1042fnx/Lli1jzJgxdOvWjcjISL755hu9snbv3s2qVavo1q0bFStW5ObNmyxfvpyWLVtiYWHxLy8v7Z1jkyZNGDBgAAsWLMDZ2Znly5dz48YNdRJwU1NTZsyYwdtvv03ZsmUJDAzk6NGjLFy4kHfeeYfSpUv/63oIIYQoHnIVGL29vVm0aJHaMgTU/zdq1AhLS0sAAgMDWbhwIcuXLyc4OJiWLVuya9cutcMKQPXq1TExMWHatGncvn2b0qVL4+/vz/z58//NdalMTU3Zs2cPEydOZNKkSTx69IiGDRsSGhrKyy+/rKYbM2YMZmZmfPTRR6xYsQIXFxdmzZqVaQWN/JLeOfjBgwdG55VeqcWT9EoVz5P+e0DWeCgcsrpGEREdHY2bm1thV0MIUYSkD0cTBUsCYxGRmprK9evXsbGxydSBKD4+Hjc3N6KiogzuvZqbPAWdrzjUMbf5pI5Sx3+TT1EUHjx4gKurKyYmJXrmziIpV49SRd4zMTHJ8S/D3AzryO1QkILMVxzqmNt8Use8yfci1tHOzs7oskTekD9FhBBCiAwkMAohhBAZSGAsBrRaLTNnzkSr1eZrnoLOVxzqmNt8UkepY37lE/lPOt8IIYQQGUiLUQghhMhAAqMQQgiRgQRGIYQQIgMJjEIIIUQGEhgL0Pnz5/Hz80On01GuXDneffddnjx5kmO+ypUro9FoMm0ZVx8JDw/PMk2fPn0Kpd7Pq49Go6FmzZoFXh+Ae/fuMWLECCpWrIhOp6Nu3bp8/vnnBtU7L+5jUXPx4kVGjBhB/fr1KVWqFHXr1jUo34oVK+jUqRNlypRBo9GwefPmTGny8z7mtt4DBgygRo0a6HQ6HBwc8PLy0lt1p6Drk9GyZcvQaDR06tRJ7/iL9P1YlMjMNwUkJiaGtm3bUqNGDbZu3cq1a9cYP348jx8/5tNPP80x/6uvvsqECRP0jmXVzTs4OFgv8Dg5ORVKvRs2bEhkZKTesfj4eAICAggICCjw+gD07NmT8+fPM2/ePCpWrMiePXsYOXIkpqamDBs2TC9tXt/Houjs2bPs3r2bpk2bkpqaSmpqqkH51qxZA6QtEpD+/+fJj/uY23o/efKE8ePHU6NGDRITE1m1ahWBgYGEhYWpa8gWZH3S3bx5k9mzZ2e5oHq6F+H7sUhRRIGYN2+eotPplHv37qnHVq5cqZiamirXrl3LNm+lSpWU0aNHZ5smLCxMAZQTJ07kSX3T/Zt6Pys4OFgBlOPHjxd4fW7cuKEASnBwsN5xLy8vpW3btup+ft3HoiglJUX9/6BBg5Q6deoYle/KlSsKoISEhGRKk5/3Mbf1flZycrLi5uamDBs2rFDr89prrykDBw5UvL29lY4dO+p97UX6fixK5FFqAdm7dy/t2rXD0dFRPdarVy9SU1Pz5HFOfsnLeq9fv54aNWrQuHHjAq/P06dPgczzT9rZ2RXq0j579+5VH49NmzZNPd6tWzc0Gg06nY4LFy7ky7lzOzl1YU9qnVfnNzU1xd7e3qDH8PlVn8OHD7N9+3YWLFjwr+og8pYExgJy/vz5TO/W7O3tcXFx4fz58znmX7duHVqtFmtrawIDAzlz5kyW6QIDAzE1NaVChQpMmjSJhISEQq13ulu3bnHgwAH69etXKPVxc3PD39+fefPm8fvvv/PgwQM2bdpEaGgoo0ePzpQ+r+/j8wQEBDB8+HAAFi1axJkzZ9i0aRPbt28HYOHChXrrlxY3BXUfDaUoCsnJydy7d4/Fixfz559/8uabbxZKXVJSUnjrrbeYNm0aLi4u2aYtavexpJN3jAUkJiYGe3v7TMcdHBy4f/9+tnm7dOlC06ZNqVixIpcvX2bu3Lm0atWKU6dOUbVqVSCt5fPuu+/i5eWFpaUlBw4cYPHixZw7d45du3YVSr0z2rhxIykpKf86MP6b+mzdupXevXtTp04dIK3FsHz5cnr06KGmya/7mJ0lS5awf/9+Ll++zJAhQ4iKigKgXbt2WQbt4qAw7qMhVq1apb5Ptra2ZuPGjTRv3rxQ6rJixQoePXrEuHHjnpumqN7Hkk4CYzHwySefqP9v3bo1/v7+1KxZk8WLF7NixQoAGjRoQIMGDdR0bdu2xcXFhbfeeovjx4/TpEmTAq93RuvWrePll18utNaPoigMGTKEP//8k/Xr1+Pi4sIPP/zAO++8g4ODg9rLrzDuo7W1NWvWrMHLy4uff/4ZSPuFGBwcnGltzuKiqH4/vvLKK9SvX5+7d+8SEhJCr1692LZt27/qEJYbt2/f5r333mPNmjWYm5s/N11RvY8lnTxKLSAODg7ExcVlOh4TE6P3vswQLi4utGrVSv0l+jy9evUCyDFddvKi3pcuXeL48eP0798/1/X4t/XZvXs3ISEhbN68mb59++Lj48PcuXMZOHBgpt6+z8qL+5iTli1b0qhRI3W/S5cuJW7l9oK4jzlxcnKiUaNGdOjQgVWrVhEQEMCkSZMKvB7vvfce9erVo3Xr1sTGxhIbG0tycjLJycnq/5+nKNzHkk4CYwGpWbNmpndgcXFx3Lhx41+P68tPeVHv9evXY2Jikidjr3Jbn99//x1TU9NMY8waNGjA9evXefz48b+u27/x9ddfc/z4cXV/3bp1HDlypBBr9GJ4+eWXuXjxYoGf9/z580RERODg4KBuP/30E/v27cPBwYH9+/cXeJ3EPyQwFpCAgAD2799PbGyseiwkJAQTExP8/f2NKuv69escPnw4x96d3377LcC/6gWaF/XesGEDPj4+OXYwyM/6VKpUiZSUFH799Ve94z///DNly5bFysrquXnz4j5mJyoqirFjxwJpLcVatWqRmprKoEGDCj1g56X8vo+5cfjwYfU9fUFatmwZYWFhettLL71Es2bNCAsLy/YRaVG8j9nJ7QQIiqKwYMECKlasiKWlJc2bN+fo0aP5XNt/Ti4KwP379xUXFxfF29tb2bdvn/LVV18p9vb2mcYntm3bVqlWrZq6v379eqVfv37K2rVrlQMHDihffvmlUq1aNcXBwUG5fPmymq5///7KzJkzlR07dij79u1TJk+erJibmyuvvPJKodQ73f/+9z8FUL788st/VY9/W5/4+HilYsWKSvXq1ZVvvvlG2b9/v/Luu+8qJiYmygcffKCmy6/7+DypqamKr6+vAigODg7KjRs3lMjISMXExEQBlFGjRuXLeRVFUR49eqSEhIQoISEhio+Pj+Lm5qbu3759W1GUrD/XEydOKCEhIcqKFSsUQJkwYYISEhKihIeHq2ny8z7mpt67du1SevXqpaxZs0YJCwtTtmzZovTo0UMBlA0bNhR4fbKS1TjGgv5+zA/bt29XKlSooPTo0UPx9PQ0eJzn/PnzFXNzc+Wjjz5S9u/fr3Tr1k2xsbFRLl26lM81VhQJjAXo999/V3x9fRVLS0ulbNmyysSJE5WkpCS9NN7e3kqlSpXU/cjISMXHx0dxcnJSSpUqpTg5OSm9evVSzp8/r5dv3rx5Sp06dRRra2vFzMxMcXd3V2bNmpWp/IKqd7qJEycqWq1WiYmJ+df1+Lf1+fPPP5VevXoprq6uipWVlVKnTh1l2bJlSnJyspomP+9jVpYvX64ACqCsXr1aPT5hwgQFUDQajRIaGpov504foJ/VFhYWpihK1vdx0KBBWebx9vZW0+TnfcxNvc+dO6d07dpVcXV1VczNzRVXV1elQ4cOesG8IOuTlawCY0F/P+aH3EyAkJCQoNja2ipBQUHqsaSkJKVSpUrKyJEj86WeGclCxUIIIQrE4MGDOXnyJL/99lu26Q4cOICvry+nTp2ifv366vHx48ezdetWrl69mq/1lOEaQgjxgktMTDRqBiBFUTINJdJqtVnO35wb6R3snu1QV6tWLf7++28SEhKwtLTMk3NlRQKjEEK8wBITE3G2dCCexJwT/z9ra2sePnyod2zmzJnMmjUrT+oUExODVqvFwsJC77iDgwOKohATEyOBUQghRP548uQJ8SQy26QjFpjlmD6Rp8x8uJuoqChsbW3V43nVWiwKJDAKIYTASqPFUpNzYDT5/7UnbG1t9QJjXnJwcCApKYnExES9VmNMTAwajQYHB4d8OW86GceYT5YuXUrFihUxNTXllVdeyZMyly1bxp49e/KkLCHEvzd48OBcLUxcFJlqTAze8lv6u8U//vhD7/j58+fVcY35SQJjPvjzzz+ZMGEC/fv359ChQ3z44Yd5Uq4ERiFEfjHRaAze8luLFi2wtbUlJCREPfb06VO2bt1KYGBgvp9fHqXmgz/++ANFURg2bFihzKphqPzu2SWEKFzG/IynBb2c20omGBcYHz9+rP5B/9dffxEfH8/mzZsB8Pb2pkyZMvj6+vLXX3+p0/NZWFgQFBTErFmzKFOmDJ6enqxYsYJ79+4xceJEo86fG9JizGODBw+mc+fOAFSrVg2NRsNnn33GW2+9hYeHB1ZWVlSuXJkRI0Zkmgz7u+++o1GjRlhbW2Nvb0+jRo3Ub6jKlSvz119/8dlnn6kL265evVrNu3r1aurVq4eFhQXly5dn2rRppKSk6H1do9EQGRmJn58fOp2uUCZPFqK4OXv2LIGBgZQuXRorKys8PDyyfQp05swZ2rdvj06nw87OjldffZW///5b/frrr79O69at1f27d+9iYmKiN8Xbw4cPMTMz02sxnTt3jq5du2JnZ4dOp6Njx45cunRJ79wajYYFCxYwefJkypUrR9myZQ2+zlImpgZvxrh9+zY9e/akZ8+ehIeHExUVpe6fPXsWSFub8tmJ0ydPnszMmTNZvHgxgYGBREdHs2/fvgJpbEiLMY/NmDGD2rVrM3nyZLZu3YqLiwvVqlXjvffeY+7cuZQpU4aoqCjmzp3LK6+8QlhYGJC2AsWrr75K3759mT9/Pqmpqfzyyy/ExMQAsG3bNgIDA2nVqpW6GkS1atUA+Oijj3j33XcZN24cS5Ys4dy5c2pgfHZl8H79+jF8+HCmTp2a7fygQog0nTt3xtnZmVWrVmFnZ8fFixeJjo7OMm1UVBReXl5Uq1aNtWvXkpiYyLRp0/D29ubXX3/FxsYGLy8v1q1bp3YsiYiIQKvVcurUKR48eICNjQ1HjhwhOTkZLy8vAC5fvkyLFi2oW7cuq1evxsTEhLlz5+Lr68sff/yh1yP0448/plmzZqxatSrbVTqeZaIxMbDFaFx7qnLlyuQ0j0x4eHimYxqNhqCgIIKCgow6X57I97l1XkDbtm1TAOXKlStZfv3p06fK4cOHFUD5448/FEVRlJCQEAVQ4uPjn1tupUqVMs0JGh8fr1hbW+tNnaQoivKf//xHsbS0VO7evasoiqIEBwcrgLJgwYJ/cWVCvFju3LmjAMp3332X5defneJs3Lhxik6nU+7du6ceO3funKLRaJRPPvlEURRFuXz5sgKo09GNHTtW6du3r1K6dGll7969iqIoyrRp0xR3d3e1jIEDBypVq1ZVEhIS1GO3b99WrK2tlc8++0w9Bii1a9dWUlNTDb7GuLg4BVC+sHpNWat7PcftC6vXFECJi4sz+BzFjTxKLSDffPMNDRo0wNraGjMzM1q1agXAhQsXAKhXrx6mpqb069ePnTt3ZrnmYFaOHDnCw4cP6dmzp7qeW3JyMu3atSMhISHT1EsdO3bM2wsTogQrXbo0lSpVIigoiK+//vq5LcV0hw4dom3btnprg9asWZOXXnqJw4cPA1ClShUqVKhAREQEABEREfj4+NC6dWsOHjyoHktvLQKEhobSpUsXSpUqpf6MOzg40KBBA06cOKFXh4CAgFwtcJ3eYjRkK+lK/hUWAdu2bWPgwIE0adKETZs2cfToUbZt2wakzToB4O7uzq5du4iLi6Nbt26UKVOGLl266L2byMrdu3cBaNiwIWZmZupWo0YNIO3RTkbOzs55fXlClFgajYbQ0FBq1arF6NGjcXNzo1GjRmpQe1ZMTEyWP2POzs7cv39f3ff29iYiIoL4+Hh++eUXvLy88PLyIiIigqSkJI4fP64XGO/evcuyZcv0fsbNzMw4dOhQnv2MF6VeqYVN3jEWgJCQEOrXr8/KlSvVY+l/GWbUoUMHOnToQHx8PN9//z3jxo1jyJAh/Pjjj88tO/0v061bt+Lm5pbp61WqVNHbz81fkkK8yNzd3QkJCeHp06ccOXKEqVOn0rlzZ65du5YpraOjI7dv3850/NatW7i7u6v7Xl5ejB8/nvDwcJycnKhZsyaPHj1i8uTJhIWFkZSUpNdBx9HRkY4dOzJq1KhMZdvY2Ojt5/ZnPL/eMRZHEhgLQEJCAubm5nrH1q1b99z0tra29OrVi2PHjrFhwwb1uLm5udrCTNe8eXOsrKyIjo6mW7dueVtxIYTKzMwMb29vpkyZQpcuXbh+/XqmNK1ateKLL74gJiZGnZ3ljz/+4Ndff2Xo0KFqOi8vLx49esRHH32ktgzr16+PpaUlCxYswM3NjcqVK6vp27Vrx2+//UaDBg0wNTWuV6ihSpmYUkqTc9mllPw5f1EigbEA+Pn5MXr0aD744AOaN2/Onj17MrUCV65cSWRkJB06dMDFxYUrV66wdu1avVXpa9WqxYEDB/jhhx9wcHCgSpUqlC5dmvfff593332X6OhofHx8MDU15fLly+zYsYMtW7ZI71MhcunXX39lwoQJ9O7dm2rVqhEXF8f8+fOpXLmy2is8o3HjxhEcHIy/vz/Tpk0jMTGR6dOnU7FiRQYPHqymq1mzJmXLluXgwYN88sknAJiamtKyZUv27t1L//799cqdPXs2jRs3pn379gwfPhxnZ2du3rzJwYMHad26NX379v3X12rorDamL0CLseRfYRHw5ptvMmHCBJYvX0737t2Jiopi/fr1emnq1avH3bt3GT9+PP7+/sycOZO+ffuyYsUKNc28efOoUKECPXr0oHHjxuzcuROACRMmEBwcTFhYGD169KBnz5588cUXNG7cOFNLVQhhuHLlylGuXDnmz59PQEAAb775Jm5uboSGhmbZcnNzc+PgwYM4ODjQv39/hg8fzksvvUR4eHimR57pLcWM7xK9vb0zHQOoXr06x48fp3Tp0owaNYr27dszZcoUHj16RL169fLkWtMH+Oe8lfzXMbJQsSiWtm/fzvXr17N855JbsbGxLFu2jF69elG7dm2j8hq6AKtGo2HRokUFMntHuvXr1zNz5kyuXr1KnTp1OH36dJ6Ue/XqVVavXs3w4cNxdXXNkzJFwYuPj8fOzo4Qh5FYmeS8Qsbj1CR6xvyHuLi4fJtEvLBJi1EUS9u3b9drTeeF2NhYZs+eze+//56n5WYUGRmZ6TFZfnr48CFDhw6lVatWhIeH88033+RZ2VevXmX27NlZvmsTxY/GwKEamhdguIa8YxQlmqIoPHnypMisFdesWbMCPd/Vq1dJSkritddeo2XLlgV6bmMlJSVhZmaGiUnJ/8VbFBncK/UFCIwl/wpFsZTd/JSDBw/m66+/5uzZs+q8sekdG9KXAdqzZw8vvfQSWq1WfRcbGRlJ27Zt1Tks+/Xrp3atv3r1qjq0pWfPnmq5V69eBdJ+aU+fPp2qVaui1WqpUKGCXmeKdOHh4TRo0ACdTkeTJk34+eef9b6u0WhYvHixuu/j40OnTp3YvHkzHh4eWFtb07Zt20xzYEZHR9OpUyesrKxwc3Nj6dKlvPPOO3o9F581a9YsPD09AfD19UWj0agrrE+ZMgVPT0+sra0pX748ffv25caNG5nK2L17Ny1btsTKygoHBwd8fHw4deoU4eHhtGnTBoDGjRur9yvdX3/9xauvvqrO69m+fXvOnDmjV3blypV56623+PDDD6lUqRKWlpZ6Y/1EwZJxjP+QFqMokrKbn3LGjBncuXOH8+fPq8NeypQpo+a9fv06b7/9ttobsGLFikRGRuLj40NgYCAbN27k0aNHTJ8+na5duxIZGYmLiwtbt26le/fuzJs3T/2l7+LiAkCPHj04cOAAU6dOpVmzZty5c4etW7fq1fnmzZu8/fbbTJkyBTs7O4KCgujWrRuXLl3CzOz5C8CePn2aRYsWsWDBAlJSUhg/fjwDBgwgMjISSGv1du3alVu3brFy5Urs7OxYtGgRf/31V7atqzfeeINq1aoxcOBAPvvsMxo2bEiFChWAtImdp06diqurK3fu3GHJkiV4e3vz+++/U6pU2q+FjRs30rdvX7p27cr69esxNzfnp59+4tq1a3h5efHZZ58xevRogoOD1fXzAB48eICPjw8mJiZ8/vnnWFhYMHfuXLy8vPj111/1xttu2bKFGjVq8PHHH2NqaopOp8vhO0PkFzOTUpiZ5BwSzEjJMU2xV7gz0gmRWU7zUypK5jkqMx4HlKNHj+od9/LyUlq0aKE3h+TZs2cVjUaj7N69W1EURbly5YoCKCEhIXp5Q0NDFUBZv359tvXRaDTKb7/9ph4LCwtTAOXQoUPqMUBZtGiRuu/t7a3odDrl9u3b6rH0eW2joqIURVGU3bt3K4ASERGhpnnw4IFiZ2enVKpU6bl1UhRFOXXqlAIoYWFhz02TnJysREdHK4Cyb98+RVEUJTU1ValQoYLSvn375+ZLv74TJ07oHf/4448VjUaj/P777+qxe/fuKTqdThk/frx6rFKlSkrp0qWVhw8fZnsNIn+lz5W613mCEuEyNcdtr/MEmStViIJm7PyUWeVv2rSpuv/48WN++uknevbsqS5vk5ycjLu7O25ubpnmmnzWjz/+iJWVFX369Mk2naurK3Xq1FH303u25lT/+vXr67V4n8134sQJ7O3t9WZCsba2xtfXN9tys7N3715atGiBnZ0dpUqVUluS6XP3/vHHH0RHR+sNSjfUoUOHqFu3LrVq1VKPOTo64ufnp84Xms7Hx0daiUWEzJX6j5J/haLYMXZ+ymc9O1dkTEwMKSkpjBs3LtNck3///XemuSafde/ePVxcXHKcasve3l5vP30M6bOzFRmb78aNG3qBM50xa+1ldOLECbp06YKrqyvffPMNkZGRHD16VO+c9+7dA8jVMAxD5wtNPyaKBo2picFbSSfvGEWRlN38lNbW1tnmfTaA2dvbo9FomDp1Kq+88kqm9E5OTtmWV7p0aW7cuIGiKIUy16yLiwt37tzJdDyrOTkNsW3bNuzs7Ni0aZP6jvKvv/7SS1O6dGmAXA3FcHR05I8//sh0/NatW3qrToDM3VuUaEw1aAzoEfwifGYlP/SLYi3j/JTx8fHqL+qs5o19Hp1OR/PmzTl37hyNGjXKtKX37HxeC69du3Y8fvyYTZs25d2FGaFx48bExsbqtZgfPnyY7eTy2UlISMDMzEzvF9yzc/d6eHhQoUIFgoODn1vO8+5Xq1atOHPmjF5wjImJYf/+/epya6Lo0ZQyMXgr6aTFKIocQ+anrFWrFl999RUbNmygRo0aODk5ZTt0YdGiRbRt25bevXvTp08fHBwciI6O5ocffmDIkCH4+PhQrlw57O3t2bBhA1WqVEGr1VKvXj3atWtHYGAgQ4cO5dKlSzRt2pT79++zefNmNm7cmO/3IyAggIYNG9KvXz/mz5+Pvb09H374ITY2Nrka8+fn58eyZcsYM2YM3bp1IzIyMtPA//RhJX379qVHjx4MHDgQrVZLZGQkjRs3plOnTri7u2NqaspXX31FqVKlKFWqFI0aNWLIkCEsXbqUjh07MmfOHLVXaqlSpXjnnXfy6K6IvKYxL4WJac4hQZNS8sNGyQ/9otgxZH7K119/nZ49ezJmzBgaN26sjs97nhYtWnD48GEePnzIkCFDCAwM5P3338fKyorq1asDYGJiQnBwMFeuXMHX15fGjRurLdQtW7bw9ttvs3LlSgICAhg/fnyOj3TzikajYceOHbz00ksMHz6cN998k44dO9KuXTvs7OyMLi8wMJCFCxeyY8cOunTpQkREBLt27cqUrnfv3uzYsYNr167Rp08f+vbty+HDh9WOOk5OTnz22WfqRNaNGzcG0pZBCg8PV+vbv39/HBwciIiIyHJpNFE0SIvxHzJXqhDF0JMnT6hduzatW7fO9nGnEDlJnys17KXZWJta5Jj+YUoibX6ZWaLnSi35bWIhSoAvvviC1NRUPDw8iImJ4T//+Q9Xr17l22+/LeyqiRJCU8qwHqcyV6oQokiwsLBgwYIF6hR1L730Ert376ZRo0aFWzFRYmhMDXtMqnkB3sBJYBSiGBg4cCADBw4s7GqIEszQMYqaF2BeGAmMQgghMNGaYlIq55BgksUCzSWNBEYhhBAG9ziVR6lCCCFeCAa/Y5RHqUIIIV4EBr9jTJXAKIQQ4gVg8KNUaTEKIYR4EUhg/IcERiGEEJiYl8LE3IBeqZqSHzZK/hUKIYTIkcZUg8Y05yWlDElT3ElgFEIIgaaUxrBHqakSGIUQQrwADH7HKL1ShRBCvBAMHK6BIWmKOQmMQgghDB/gnyKBUQghxAvA4AH+0mIUQgjxIjB4uIZS8sNGyb9CIYQQOTPVpG2GpCvhJDAKIYRAY2KCxsSAR6kGpCnuJDAKIYRIC4yGvGOUwCiEEOJFoDHRGNhilEepQgghXgTyjlFV8tvEQgghcmRiVsrgzRjnz5/Hz88PnU5HuXLlePfdd3ny5EmO+SpXroxGo8m0JSYm5vYSDSaBUbyQKleuzFtvvaXub9++nRUrVhhdztWrV9FoNGzevDkvqwfA4MGDqVu3bo7pNBoNixcv1jv27rvv4uLigomJCe+8885zr8/Qc4iSL73zjSGboWJiYmjbti1Pnjxh69atzJs3jy+++ILx48cblP/VV18lMjJSb9Nqtbm9RIPJo1TxQtq2bRsODg7q/vbt2zl58iSjRo0qxFrlTmRkJJUqVVL39+/fz6JFi1i6dClNmzbF1dWVmTNnZnl9M2bM4NGjRwVdZVEU5cOj1M8//5z4+Hi2bduGo6MjAMnJyYwaNYqpU6fi6uqabX5nZ2eaNWtm8PnyigRG8UJq0KBBYVchzzz7i+P8+fMAvP3225jk8Nd9tWrV8q1eonjRaAwcrqExvMW4d+9e2rVrpwZFgF69ejFixAhCQ0MZPHhwbqqa7+RRqihR0h8N7t27l7p162JhYcHLL7/M0aNH9dJlfJQ6ePBgvv76a86ePau+x8j4AxsZGYm/vz+2trbY2NjQtGlTfvjhB73yEhMTeeutt3BwcMDFxYWJEyeSnJycbV3Pnj1LYGAgpUuXxsrKCg8PDz788MNM6cLDw2nQoAE6nY4mTZrw888/630946NUHx8fxowZA4CpqSkajYbKlSs/9/qefZS6evVqNBoNp06dIiAgAJ1OR40aNVizZo3eORVF4f3336dcuXJYW1vTs2dP9u/fj0ajITw8PNvrFkVT+pRwhmwA8fHxeltSUlKmMs+fP0/NmjX1jtnb2+Pi4qL+AZeddevWodVqsba2JjAwkDNnzuTNxeZAAqMocW7cuMGoUaOYNGkSmzZtQqvV0r59e27fvp1l+hkzZhAYGEjVqlXV9xgzZswA4KeffsLHx4ekpCS+/PJLtmzZQteuXfn777/1ypg2bRomJiZs2rSJESNGsGTJEr788sts69m5c2diYmJYtWoVu3fvZuLEiZkea968eZO3335bvZbExES6devG06dPsyxzxYoVvPPOOwDqtezYseO51/c8/fv3x9/fn+3bt9OgQQMGDx7MuXPn1K8vX76cWbNmMXjwYLZu3Uq1atV44403si1TFG1pwzUM2wDc3Nyws7NTt/nz52cqMyYmBnt7+0zHHRwcuH//frb16dKlC59++in79+/ns88+4+LFi7Rq1YrLly/nyfVmRx6lihLn/v37hISE0LZtWwC8vb1xc3Nj6dKlWf7wVqtWjTJlyvDXX39leiz57rvvUr16dQ4cOICpqSkA/v7+mcpo2rQpn3zyCQB+fn6EhYWxefNmRowYkWUd7969y5UrV/j444/p3LkzAG3atMnyWg4ePEidOnUA0Ol0tGnThmPHjtGqVatM6WvXrq2+b8x4Lc+7vud566231PeRLVq0YPfu3WzZsoXp06eTkpLCggULGDJkCAsWLFDvyd27d1m1apVB5YuiR2NWCo0BPU41yWlpoqKisLW1VY/ndaeY9J8ngNatW+Pv70/NmjVZvHhxrjrKGUNajKLEsbOzU4Ni+n67du04duyYUeU8fvyYo0ePMmjQIDUoPs+zwbJ27dpER0c/N33p0qWpVKkSQUFBfP31189N6+rqqgbF9HKBbMvOCxmvR6fTUalSJfWc0dHR3Lhxgy5duujl6dq1a77WSeQvjanGwEepaS1GW1tbvS2rwOjg4EBcXFym4zExMXrvHQ3h4uJCq1atMr1KyA8SGEWJU6ZMmUzHnJ2duXHjhlHlxMTEkJqammPPOSDT4yJzc/Nsx1tpNBpCQ0OpVasWo0ePxs3NjUaNGhEREZFjuUC+j+XK7nrS7+Oz97ls2bL5WieRv4x9lGqImjVrZnqXGBcXx40bNzK9eyxKJDCKEufOnTuZjt26dQsXFxejyrG3t8fExITr16/nVdX0uLu7ExISQkxMDOHh4Wi1Wjp37szDhw/z5Xx5Jf0+Pnufn/cOVxQTJiaGbwYKCAhg//79xMbGqsdCQkIwMTHJ8pVEdq5fv87hw4dp3LixUflyQwKjKHHi4uI4cOCA3v7+/ftp2rTpc/Nk1cLT6XQ0b96cNWvWkJKSkm/1NTMzw9vbmylTphAfH58vgTinFqwxKlSoQLly5dixY4fe8e3bt+dJ+aJwpE8inuNmRGAcMWIENjY2vPLKK4SGhhIcHMykSZMYMWKE3pMYX19fqlevru5v2LCB/v37s27dOsLCwli1ahVeXl6YmpoyYcKEPL3urEjnG1HiODo68vrrrzN79mzs7e1ZsGABiqKovTWzUqtWLb766is2bNhAjRo1cHJyonLlyixYsIC2bdvSrl07Ro0ahYODA//73/9wcnJi6NChua7jr7/+yoQJE+jduzfVqlUjLi6O+fPnU7ly5XwZW/i868sNU1NTgoKCeOedd3B2dqZNmzaEhYWxf/9+gBzHTooiytBZbYz4fB0cHPjxxx8ZM2YMr7zyCjY2NrzxxhvMnTtXL11KSore8KYqVapw/fp13nnnHWJjY7G3t6dt27a8//77VKlSxeDz55YERlHiuLi4sHDhQiZNmsSlS5eoU6cO+/btw9nZ+bl5Xn/9dY4fP86YMWO4d+8egwYNYvXq1bRq1Yrw8HCmT5/O4MGDMTU1pU6dOsyZM+df1bFcuXKUK1eO+fPnc+3aNezs7GjdujVr167NsaNPbjzv+nJrzJgxxMTEsGLFCj755BPatWvHokWL6N27N3Z2dnlXcVFgNGamaMxy/t4zJE1GtWrVUv9oep5nx742a9aMsLAwo86TlzSKoiiFdnYh8tjgwYM5efIkv/32W2FX5YUzY8YMlixZwr1797C0tCzs6ggDxcfHY2dnx1/rj2NrZZ1z+scPqdSvCXFxcXrDNUoSaTEKIYx27tw51q5dS4sWLTA3Nyc8PJzFixczcuRICYrFlKE9TmU9RiGEyIKVlRWRkZH85z//4cGDB5QvX55JkyYxa9aswq6ayKWM073llK6kk0epQgjxAkt/lBq1+X/Y6gx4lProIW6vNpRHqUIIIUo2jSZt4glD0pV0EhiFEEKAiSZtMyRdCSeBsYhITU3l+vXr2NjYGPRXmxCi5FIUhQcPHuDq6lpg40LlHeM/JDAWEdevX8fNza2wqyGEKEKioqKoUKFCgZxLeqX+QwJjEWFjYwPAmS/2Y2OpMyiPkpj1mnzZsaho3Iz2D+o9MPocVscsjEp/0fN/Rp+j7n0fo9LHHr9qVHpTS+OX0LFtUdGo9Mn25kaf40GIceMzzRxtjD6HRQXjBugn3XqUc6IMUuKMSw9gUcm479vk+CdGnyM5xrg5alOeGP/zZ2puZlC6BwmPqPd2gPp7oUCYYNgkoSW/wVhwgVEGXmcv/fGpjaXOoEG2AKkmxv9gWuqM/EGzNb7Tsk5n3Dg2axsro89h+8S460ixNOyepjO1ykVgtDGuh16yrfGBEQP/aEpnbuD3UkYW1sbd26QHxrUgkp8a3+Iw9vs2OTnzavI5eWpklhTTXARGrWGBMV1BvlbRaDQGdr6RFmOemTFjRqbVyYUQQhQR0vlGVWCBMT8mRs5KQkKCzLwhhBDGksCoKrCnxYMHD6Zu3boArF69Go1Gw6lTpwgICECn01GjRg3WrFmTKd/u3btp2bIlVlZWODg44OPjw6lTp4C0iWc1Gg27d+/m1VdfxdbWlp49ewIQGxvLqFGjcHFxQavV8vLLLxMaGpqpbD8/P8qWLYutrS1Nmzbl+++/10sTGxvLsGHDKF++PBYWFri5udGnTx+9NNHR0QwYMAAnJycsLS3x8vIqkFWmhRAir2hMNGhMDdgkMOav/v374+/vz/bt22nQoAGDBw/m3Llz6tc3btxI586dKVu2LOvXr2fdunW0bNmSa9eu6ZUzfPhwqlWrxrZt25g4cSJPnjzBz8+PXbt2MXfuXL777jtq165Nx44dOXPmjJrvypUrdO7cmW+++YYtW7bQsmVLAgMD9WZ6Hz9+PLt27WLevHns27ePRYsWodX+8/4pJiaGVq1acfr0aZYvX86WLVvQ6XS0bds224Vbk5KSiI+P19uEEKKwpPdKNWQr6Qq1V+pbb73FqFGjAGjRogW7d+9my5YtTJ8+HUVRmDhxIv7+/mzbtk3NExgYmKmcLl26sHDhQnU/ODiY06dP88svv1C7dm0A2rdvz59//skHH3zApk2b1POnS01NpU2bNpw9e5YvvvgCHx8fAI4fP06/fv0YNGiQmjZji3HZsmXExsZy/PhxypYtC6Qtuunu7s7ixYv58MMPs7z2+fPnM3v2bKPulxBC5Ju0qW8MS1fCFWqL0d/fX/2/TqejUqVKREdHA/DHH38QHR1t0GKwHTt21NsPDQ3F09MTd3d3kpOT1c3Pz48TJ06o6aKjoxk0aBDly5enVKlSmJmZERoayoULF9Q0DRs2ZPXq1SxevDjLHrWhoaG0adMGR0dH9TympqZ4e3vrnetZQUFBxMXFqVtUVFSO1ymEEPlFWoz/KNQWo729vd6+ubk5iYmJANy7dw8AV1fXHMt5dgHau3fvcurUKczMMneNTl8ENjU1lS5duhAXF8f7779P9erV0el0vPfee/z9999q+uXLl+Po6MiSJUuYNGkSbm5uBAUFMXLkSPVcR48ezfJc2XU40mq1eo9khRCiUEnnG1WRHeBfunRpIG1GmJw8O67G0dGRevXqsWrVqufmuXjxIqdOnWL79u107dpVPZ6QkKCXzs7OjmXLlrFs2TLOnDnDxx9/zKhRo6hbty6tW7fG0dGRDh068MEHH2Q6hwQ+IURxIeMY/1FkA6OHhwcVKlQgODiYXr16GZW3Xbt27NmzB1dX1+e2ONMDoLn5P4Os//rrL3766Sfc3d2zzOPp6cnSpUtZtWoV586do3Xr1rRr1461a9dSq1YtdDrjBl8LIURRkd7r1JB0JV2RDYwajYbFixfTt29fevTowcCBA9FqtURGRtK4cWM6der03LwDBw5k5cqV+Pj4MHHiRNzd3YmNjeXUqVM8efKE+fPnU7NmTSpUqMCUKVNISUnh4cOHzJw5k/Lly+uV1bJlS7p160bdunUxNTVlzZo1mJub07p1ayCt1+q6devw9vZm7NixVKxYkTt37nDs2DFcXV0ZN25cvt4nIYTIE/IoVVVkAyNA7969sbKyYu7cufTp0wcLCwsaNmxIt27dss2n1Wo5cOAAs2bNYu7cudy4cQMnJycaNGig9oLVarVs3bqV0aNH07NnT9zc3Jg+fToHDhzg5MmTalktW7ZkzZo1XLlyBRMTEzw9Pdm5cye1atUC0h75Hj16lOnTpzN58mTu3btH2bJladasWY71FEKIokImEf+HRlEU4yfDFHkufRXtGzevGbwqtvn9FKPPk2ppalT65FxMIvRrvY+MSl/v1/FGn8MkxbgO1cnHbxqV3rS5i1HpAe4sCTcqfdkJPkafI9gu517aGQ0KX5hzomdoKxo3ifiDUzeMSm9V2cGo9AC4GvmaIhe/u03uJhp3ilwsv3R94/N7qmf0IPERdT7oQVxcnMG/D3Ir/XfPvZ+vYGvAPLnxDx9Q+uUqBVK3wlKkW4xCCCEKhkZjWGvwBeh7U/IXEMk4FV1+i42NZdasWfz+++8Fcj4hhMgzJkZsJdwLcIkFJzY2ltmzZ0tgFEIUOxpTE4O3kk4epQohhEh7L2vIY1J5lFpyhIeH06BBA3Q6HU2aNNFb/UJRFBYvXoy7uztarZaqVauydOlSvfznz5+nT58+uLm5YWVlRe3atVmyZAmpqakAXL16lSpVqgDQs2dPdbDs1atXC+wahRAi19LnSjVkK+FeiBbjzZs3efvtt5kyZQp2dnYEBQXRrVs3Ll26hJmZGWPHjuXLL79k2rRpNG3alCNHjjB58mQsLS0ZMWIEANeuXcPDw4P+/ftjY2PD6dOnmTlzpjr+0cXFha1bt9K9e3fmzZtHmzZtAHBxybp3Y1JSEklJ/ywZLqtrCCEKk6JJ2wxJV9K9EIHx/v37HDx4kDp16gBpE5a3adOGY8eO4eLiwqeffsrnn3/O8OHDgbSZcx4/fszs2bMZPnw4JiYm+Pr64uvrC6S1MFu1asXjx4/59NNPmTlzJlqtlgYNGgBQo0YNmjVrlm2dZHUNIUSRIo9SVS/Eo1RXV1c1KALqUlTR0dHs378fgB49euitxNGuXTtu3ryprnqRmJjIzJkzqV69OlqtFjMzM6ZNm8aNGzd4+PCh0XWS1TWEEEWKPEpVvRAtxqxW8YC0YHf37l0URcHJySnLvFFRUVSqVInJkyfz3//+l5kzZ/Lyyy9jb2/Pjh07mDNnDomJiVhbWxtVJ1ldQwghiqYXIjBmx9HREY1Gw+HDh/UmFE/n4eEBQEhICG+++SaTJ09Wv7Z79+4Cq6cQQuQnxUSDYsAAf0PSFHcvfGBMf2947949Onfu/Nx0CQkJeoEzJSWFb7/9Vi9NxpaoEEIUK/KOUfXCB0Z3d3dGjx7Na6+9xqRJk2jatClPnz7lwoULhIWFsX37dgD8/Pz473//S+3atXFycmLFihV6vUoBypUrh729PRs2bKBKlSpotVrq1auXZUtUCCGKFAmMqhei801OPvnkE+bMmcO3335Lx44dGTBgABs3bsTb21tNs3z5cry9vRkzZgyvv/46np6eTJ06Va8cExMTgoODuXLlCr6+vjRu3NighZaFEKKwpQ/XMGQr6WR1jSIifYb721HXDZ6x3iTB+NU1km2NW11Dk4u/nYz9hlIw/jqMXV1DMTWuVprUXFy3Sapx58jFvTV9kGxUesU8///2TTGyD1lBfE+BcZ9F2jmMO4uJYtzPEoCiMaxe8fHxlCvnWqCra9y9fA1bm5zPFf8gHqeq5WV1DSGEECWcPEpVFYtHqT4+PnTq1KmwqyGEECWWotGoPVOz3V6AcYzFIjAKIYQQBaVEB8aEhITCroIQQhQPGiO2Es6owHjo0CE0Gg2XLl1Sj3Xu3BmNRsPZs2fVY3379qVjx44ATJkyBU9PT6ytrSlfvjx9+/blxo0beuX+9NNPeHl5YWdnh42NDZ6ennz99deZzr9582Y8PDywtrambdu2evW4evUqGo2G1atXM2zYMEqXLk2TJk2AtLlShw4dipOTE5aWlrRo0YKIiIhM5a9cuRIPDw+0Wi2VK1dmzpw56uoZAKtXr0aj0XDy5En8/f2xsrLCw8OD/fv3k5qayvTp03F2dsbZ2ZmgoCC9vEIIUaTJlHAqowJjkyZNsLCwUINKamoqhw8f1jsGEBERgZeXFwC3b99m6tSp7N69m48//pirV6/i7e1NcnJa77r4+Hg6duyIra0tGzZsYPv27QwfPpzY2Fi9c58+fZpFixaxYMECVq9ezcWLFxkwYECmOgYFBaEoChs2bGDRokWkpKQQEBDAzp07WbhwISEhIVhbW+Pn56e39NTy5csZMWIE7du3Z+fOnQwePJhZs2bx7rvvZjrHwIED6dSpE9u2bcPV1ZXu3bszduxYoqKiWLNmDaNHj2bBggWZJgDIKCkpifj4eL1NCCEKjbQYVUb1StVqtTRp0oSIiAiGDBnCr7/+yqNHjxg6dCgHDx5k5MiRXLx4kevXr6uB8auvvlLzp6Sk0Lx5cypUqMCBAwfw9/fnwoULxMXFMX/+fDw9PYF/ZqPJKDY2llOnTlGmTBkAHj58yJAhQ4iOjqZChQpquvr16/Pll1+q+9999x3Hjx/n+++/p3379gC0b9+e6tWrM2/ePLZs2UJKSgrvv/8+ffr04ZNPPgHA39+fJ0+esGTJEoKCgihdurRa5pgxYxg5ciQA5cuXx9PTk5MnTxIZGamW/9133xESEkK/fv2yvJeyuoYQoijJr2Wnzp8/z5gxYzhy5Ag2NjYMHDiQOXPm5DjxiaIoLFy4kBUrVnDnzh3q16/P0qVLc1y5KC8Y/Y7Ry8tLbR1GRETQqFEjAgIC9I5ZWVnRqFEjAPbu3UuLFi2ws7OjVKlSahC7cOECANWqVcPW1paRI0eyadMm7ty5k+V569evrwZF0F8hI6P0R7jpDh06hK2trRoUAczMzOjevTuHDx8G0j64u3fv0rNnT728vXv35smTJxw/flzvuJ+fn/p/d3d3IHMwd3d3z3bFDFldQwhRpJhoDN8MFBMTQ9u2bXny5Albt25l3rx5fPHFF4wfPz7HvAsXLmTmzJmMGzeOXbt24eLigr+/P5cvX/43V2kQowOjt7c3ly9f5tq1a+oj09atW3Pz5k3+/PNPIiIiaNasGWZmZpw4cYIuXbrg6urKN998Q2RkJEePHgX+mU/UwcGBH374ARsbG1577TXKlSuHj48PZ86c0TtvditkZOTs7Ky3HxMTQ9myZTNdh7OzM/fv31fTZJU3fT89XVZ1Sa9HVvXLbs5UrVaLra2t3iaEEIUmHx6lfv7558THx7Nt2zbat2/P0KFD+fDDD/n888+znRUsMTGR+fPnM2HCBMaNG4evry/ffvstjo6OLF68ONeXaCijA2Pz5s0xMzMjIiKCQ4cO4eXlhaOjI3Xq1OHgwYNERETQunVrALZt24adnR2bNm2iS5cuNGvWjHLlymUqs0mTJuzdu5fY2Fh27tzJ7du3eeWVV3J1QZpnXgw7Ojpy+/btTOlu3bqFo6OjmgbIlO7WrVt6XxdCiJJK0WgM3gy1d+9e2rVrp/c7tFevXqSmphIaGvrcfEeOHCE+Pp5evXqpx8zNzenevTt79uzJ3QUawejAqNPpaNiwIStXruTevXu0atUKSGtJrlu3jitXrqjvFxMSEjAzM9MLVuvWrXtu2ZaWlgQGBjJy5EiuXLmSJ6tUtGrVivj4eL0PITk5mW3btql19/DwoEyZMoSEhOjl3bRpE+bm5mrvViGEKLGMbDE+23nw2UUVIO01Vc2aNfWO2dvb4+Liwvnz559blfSvPZu3Vq1a/P333/k+FC9X4xi9vLw4ePAgL730kvoI0MvLi/DwcMzMzGjevDmQ9i7u5s2bjBkzhh9//JE5c+ZkGoaxe/duunfvzjfffMPBgwfZuHEjy5cvp2XLllhYWPzLy0t759ikSRMGDBjAV199xe7du+nUqRM3btxQJwE3NTVlxowZbNiwgXfeeYfQ0FDef/99Fi5cyJgxY/Q63gghRMmkGLGBm5sbdnZ26jZ//vxMJcbExGR6zQRpr9CefUX1bD6tVpspBjg4OKAoivr6K7/kaq5Ub29vFi1apLYMAfX/jRo1wtLSEoDAwEAWLlzI8uXLCQ4OpmXLluzatUvtsAJQvXp1TExMmDZtGrdv36Z06dL4+/tneZNzw9TUlD179jBx4kQmTZrEo0ePaNiwIaGhobz88stqujFjxmBmZsZHH33EihUrcHFxYdasWZlW0Mgv6XO5P3jwwOA8Jom5mEQcmUTcEEV2EvGHRk4ibiaTiBt+jqIziXj674GCXONB+f9/hqQDiIqK0usbodUa+Y1QlCmiSIiKijLmzzXZZJPtBdiioqLy/XdPXFycAig3bl1THiU8yHG7ceuaAihxcXE5ll2mTBllypQpmY67uroqkydPfm6+zz77TAGUhIQEveNffPGFotFolMePHxt/oUaQ1TWKCFdXV6KiorCxsdF7JxsfH4+bm1umv86ex9j0JeUcUqeic46iWKeCOEde1klRFB48eICrq6tB5eQFhRQMeXpjzBOemjVrZnqXGBcXx40bNzK9P3w2H8Aff/zBSy+9pB4/f/48FStWVJ9K5hcJjEWEiYmJ3kQFzzJ2SEduhoCUhHNInYrOOYpinQriHHlVJzs7O6PK+LeMfZRqiICAAObNm0dsbKz6rjEkJAQTExP8/f2fm69FixbY2toSEhKiBsanT5+ydetWAgMDDT5/bpXoScSFEEIYSEk1fDPQiBEjsLGx4ZVXXiE0NJTg4GAmTZrEiBEj9FrDvr6+VK9eXd23sLAgKCiIxYsX8/HHH3PgwAH69u3LvXv3mDhxYp5edlakxSiEEOL/X2wa0mI0nIODAz/++CNjxozhlVdewcbGhjfeeIO5c+fqpUtJSVHnz043efJkFEVh8eLF6pRw+/bto2rVqkbUIHckMBZxWq2WmTNnGtzjy9j0JeUcUqeic46iWKeCOEdB1Ck/KaSiGNCb15A0GdWqVYv9+/dnmyY8PDzTMY1GQ1BQEEFBQUadLy9oFKUA+wMLIYQoUuLj47GzsyPq5p/Y2toYkP4BbuVqEBcXV2KnspQWoxBCCFJJIdWAHqeGpCnuJDAKIYTgn+GThqQr2SQwCiGEyLd3jMWRDNcoAipXroxGo8m0GTKJ+vnz5/Hz80On01GuXDneffddnjx5UgC11jdgwABq1KiBTqfDwcEBLy+vbGfPz+j69ev06NEDGxsbHB0deeONN4iPj8/nGmdtxYoVdOrUiTJlyqDRaNi8ebPBeQv7s7h48SIjRoygfv36lCpVirp16xqUT1EUFixYoA6cbt68ubo8XEHbs2cP3t7elClTBq1WS9WqVRk/fjxxcXE55l21ahXu7u5YWFjw0ksvsWvXrgKocdZWr16d5c/0lClTss1XmJ+FYsS/kk5ajEXEq6++yoQJE/SO5dRTLX0R0Bo1arB161auXbvG+PHjefz4MZ9++ml+VjeTJ0+eMH78eGrUqEFiYiKrVq0iMDCQsLAwdRmyrDx9+lRdRHr9+vU8fvyYiRMn0q9fv0L5xbZmzRogbZ7f9P8boih8FmfPnmX37t00bdqU1NRUUlMN+8s+fUHYBQsWUK9ePT777DP8/f05ffp0gXSNz+j+/fs0bdqUt99+m9KlS/Pbb78xa9Ysfvvtt2z/0Pr2228ZNmwY06ZNo23btmzcuJFu3bpx6NChAlnx/Xm+//57vYH65cuXzzZ94X4WqRg2x2zJbzHKXKlFQKVKlZTRo0cbnW/evHmKTqdT7t27px5buXKlYmpqqly7di0vq2i05ORkxc3NTRk2bFi26davX69oNBrl/Pnz6rF9+/YpgHLs2LH8rmYmKSkpiqIoypUrVxRACQkJMShfUfgs0uuuKIoyaNAgpU6dOjnmSUhIUGxtbZWgoCD1WFJSklKpUiVl5MiR+VJPY33xxRcKkO19dHd3V/r27at3rHnz5kpAQEB+Vy9LwcHBCqDcuXPH4DyF9Vmkz5V65eYvyt3Hl3Pcrtz8xeC5UosreZRajOV2EdCCYGpqir29fY6PEvfu3Uu9evXw8PBQj/n5+eHo6FggC5I+y8Qkdz8Suf0s9u7dqz5mmzZtmnq8W7duaDQadDodFy5cyLe6F/aCsIZIX/bted9Lly9f5sKFC3rXANCnTx9+/PHHLNcJLIoK+7NInyvVkK2kk8BYRKxbtw6tVou1tTWBgYGcOXMmxzy5XQQ0vyiKQnJyMvfu3WPx4sX8+eefvPnmm9nmyeoaNBpNlpMPF2W5/SwCAgIYPnw4AIsWLeLMmTNs2rSJ7du3A2mP1jIu05Yf9YbCWxD2eVJSUkhMTOR///sf77//Pl26dKFy5cpZps3uGp48ecKVK1fyu7rPVadOHUxNTalatSrz588nJeX5QaWwPwtFUVCUVAM2eccoCkCXLl1o2rQpFStW5PLly8ydO5dWrVpx6tSpbN8r5HYR0PyyatUqhg0bBoC1tTUbN25UF61+nqJ2Dbn1b65jyZIl7N+/n8uXLzNkyBCioqIAaNeuHaNHj86P6qoMWRA2v1cyyEqlSpW4du0aAB06dGD9+vXPTZu+aO2z99/BwQGgUL6PXFxcmD17Nk2bNkWj0fDdd98xffp0rl279tx3zoX9WeTHJOLFlQTGAqQoit5fjBqNBlNTUz755BP1WOvWrfH396dmzZosXryYFStWFEZVn+t51wDwyiuvUL9+fe7evUtISAi9evVi27ZtBAQEFFZ1nyu76yho1tbWrFmzBi8vL37++WcgbWWF4OBgvSXIXiR79uzh0aNHnD17ljlz5tC5c2d++OGHQvuMjNW+fXu1UxmAv78/lpaWLF26lGnTpuHi4lKItXse6XyTTh6lFqCDBw9iZmambr6+vlmmc3FxoVWrVuovyedxcHDIsht7TEyM3ruuvJTdNTg5OdGoUSM6dOjAqlWrCAgIYNKkSdmWVxjXAIZ/Fob6t9fRsmVLGjVqpO536dIl22XI8oqDgwNJSUmZhgbFxMSg0WjUVldBq1evHs2bN+eNN95gx44dhIWFsW3btizTptfx2fuf3pLMz+8jY/Tq1YuUlBROnz6d5dcL+7OQ4Rr/kBZjAXr55Zc5ceKEum9jk/O8hNnJ7SKg/4Yx1/Dyyy+zd+/ebMurWbNmpvepiqLwxx9/4Ofn9+8qm42i9ll8/fXXHD9+XN1ft24dI0aMoEWLFv+qXjkp7AVhDVGvXj3MzMy4ePFill9Pv4bz58/rdeI6f/485ubmBT7kJLcK+7OQAf7/kBZjAbKxsaFRo0bqlvGHOKPr169z+PBhGjdunG15AQEB7N+/n9jYWPWYIYuA/huGXgPA4cOHc/ylFBAQwC+//MKff/6pHvvxxx+5d+9evi5Iasx1GOLffBZRUVGMHTsWSGsp1qpVi9TUVAYNGsTjx4//Vb1yknFB2HQFuSCsIY4dO8bTp0+f+71UtWpV3N3d9a4BYOPGjfj6+mJubl4Q1czRt99+i6mpKQ0aNMjy64X9WUiv1AwKbaCIUBQlbRxfv379lLVr1yoHDhxQvvzyS6VatWqKg4ODcvnyZTVdeHi4Ympqqnz99dfqsfv37ysuLi6Kt7e3sm/fPuWrr75S7O3tczUm8t/YtWuX0qtXL2XNmjVKWFiYsmXLFqVHjx4KoGzYsEFNd/XqVcXU1FSZPXu2euzJkydK3bp1FU9PT2Xnzp3Kxo0bFTc3N6Vjx44Feg3pTpw4oYSEhCgrVqxQAGXChAlKSEiIEh4erqbJy88iNTVV8fX1VQDFwcFBuXHjhhIZGamYmJgogDJq1CiD6/7o0SMlJCRECQkJUXx8fBQ3Nzd1//bt24qiKErbtm2VatWq6eWbP3++otVqlWXLlik//vij0qNHD8XGxka5dOmSwefOK926dVPmzp2r7Ny5U9m/f7+yZMkSpVy5ckq9evWUpKQkRVEUZejQoYqpqalevvTxsO+9954SFhamjBgxQilVqpRy5MiRAr8GRVEUf39/ZcGCBcru3buV3bt3K2+++aai0WiUd955R01TVD6L9HGM564fVKIf/pzjdu76wRI/jlECYyGLjIxUfHx8FCcnJ6VUqVKKk5OT0qtXL70B74qiKGFhYQqgBAcH6x3//fffFV9fX8XS0lIpW7asMnHiRPUXSEE5d+6c0rVrV8XV1VUxNzdXXF1dlQ4dOugFE0X5Z9D8zJkz9Y5HR0cr3bt3V6ytrRV7e3tl6NChhfZDN2jQoPSZlPU2b29vNU1efhbLly9Xz7F69Wr1+IQJExRA0Wg0SmhoqEF1T7+/WW1hYWGKoiiKt7e3UqlSJb18qampyrx585QKFSooWq1Wadq0aaEFlPnz5yv169dXbGxsFJ1Op9SpU0eZMWOG3vdD+mf0rC+//FKpXr26Ym5urv6hVVjefvttpUaNGoqlpaWi1WoVT09P5eOPP1ZSU1PVNEXls0gPjGevHVD+fnA8x+3stQMlPjDKeoxCCPECS1+P8bdr+7Gx1eWY/kH8I+qWbyfrMQohhCjZUpUUUhUD1mM0IE1xJ4FRCCGEBMYMJDAKIYQgVUklVcl5KIYhaYo7CYxCCCFIVZ6QopgZlK6kk8AohBDi/1uMhjxKLfktRhngnw8GDx5s8OrpQoiiz5CfaY1Gw+LFi40uO7f58lr6O0ZDtpJOWoxCCJEHIiMjqVSpUmFXI9dSMbDzzQsw840ExhdMQkJCkZj/UoiSplmzZoVdhUySkpIwMzMzaBHrVFIMCnovQmCUR6kF4MyZM7Rv3x6dToednR2vvvoqf//9t/r1119/ndatW6v7d+/excTERG+u1IcPH2JmZqY3j+K5c+fo2rUrdnZ26HQ6OnbsyKVLl/TOrdFoWLBgAZMnT6ZcuXKULVs2H69UiJItPDycBg0aoNPpaNKkid4KOM8+ElUUhffff59y5cphbW1Nz5492b9/PxqNhvDwcL1yU1NTmTVrFs7Ozjg5OTFkyBAePXqklyY6OpoBAwbg5OSEpaWl3jJl6SpXrsxbb73Fhx9+SKVKlbC0tDR4PUpFSTF4K+kkMOazqKgovLy8uHfvHmvXruXzzz/nf//7H97e3jx48AAALy8vTpw4oS43ExERgVar5dSpU2qaI0eOkJycjJeXFwCXL1+mRYsW3L9/n9WrV7N+/Xru3LmDr68vSUlJenX4+OOPuXDhAqtWrWLt2rUFePVClBw3b97k7bffZtKkSWzatInExES6devG06dPs0y/fPlyZs2axeDBg9m6dSvVqlXjjTfeyDLtp59+yp9//snXX3/Ne++9x/r16/nggw/Ur8fExNCqVStOnz7N8uXL2bJlCzqdjrZt23L79m29srZs2cKuXbv4+OOP2bFjBzpdzrPZACSnPjF4K+nkUWo+W7p0KU+fPiU0NFRdF65BgwbUrl2b1atXM2bMGLy8vEhKSuLYsWN4e3sTERFBt27dCA0N5aeffqJDhw5ERETg7u6Os7MzALNnz8bR0ZEffvhBXfG7RYsWVK1alVWrVjFq1Ci1Do6OjmzduvWFXfRWiLxw//59Dh48SJ06dQDQ6XS0adOGY8eO0apVK720KSkpLFiwgCFDhrBgwQIgbbHiu3fvsmrVqkxlu7i4sG7dOgA6dOjA//73PzZv3qzmXbZsGbGxsRw/flx96uPr64u7uzuLFy/mww8/VMt6+vQpe/fuNTggpktVFAPHMZb8WUSlxZjPDh06RNu2bfUWS61ZsyYvvfQShw8fBqBKlSpUqFCBiIgIIK3F6OPjQ+vWrTl48KB6LL21CBAaGkqXLl0oVaoUycnJJCcn4+DgQIMGDfTWGYS0JZEkKArx77i6uqpBEaB27dpA2iPOZ0VHR3Pjxg26dOmid7xr165Zlv3s2qO1a9fWKzc0NJQ2bdrg6Oio/rybmpri7e2d6efdx8fH6KAIactOpRqwvQjLTkmLMZ/FxMRQv379TMednZ31nv2ntxTj4+P55Zdf8PLy4tGjR2zevJmkpCSOHz/OsGHD1PR3795l2bJlLFu2LFPZz64/l97KFELknr29vd5++s9Z+iuQjG7cuAFAmTJl9I4/7x1/VmVnfCVy9+5djh49iplZ5gH41apV09vP7c+7oe8PX4R3jBIY85mjo2OmdwAAt27dwt3dXd338vJi/PjxhIeH4+TkRM2aNXn06BGTJ08mLCyMpKQkvQ46jo6OdOzYUe+RabpnV6OX1qIQBcvFxQWAO3fu6B3P6neBIRwdHenQoYPee8d0Wq1Wbz+3P+8yV+o/JDDms1atWvHFF18QExODg4MDAH/88Qe//vorQ4cOVdOltxA/+ugj9ZFp/fr1sbS0ZMGCBbi5uVG5cmU1fbt27fjtt99o0KABpqamBXpNQojsVahQgXLlyrFjxw69x6fbt2/PVXnt2rVj7dq11KpVK1ePSQ0hgfEfEhjz2bhx4wgODsbf359p06aRmJjI9OnTqVixIoMHD1bT1axZk7Jly3Lw4EE++eQTAExNTWnZsiV79+6lf//+euXOnj2bxo0b0759e4YPH46zszM3b97k4MGDtG7dmr59+xbkZQohMjA1NSUoKIh33nkHZ2dn2rRpQ1hYGPv37wcwaFxhRuPHj2fdunV4e3szduxYKlasyJ07dzh27Biurq6MGzfuX9c5WXlKspLzH9nJSta9cEsS6XyTz9zc3Dh48CAODg7079+f4cOH89JLLxEeHp7pkWd6SzFjJxtvb+9MxwCqV6/O8ePHKV26NKNGjaJ9+/ZMmTKFR48eUa9evXy+KiFETsaMGcPMmTP56quv6NatG7///juLFi0CwM7OzqiySpcuzdGjR6lfvz6TJ0/G39+fcePGcfXqVZo2bZon9ZVxjP/QKMoL0PdWiCImPDycNm3acOLECRo1agTArFmz8Pf3p0WLFoVcO5FfZsyYwZIlS7h3716RmYEqPj4eOzs7tp99H52NRY7pHz1I5JU67xEXF4etrW0B1LDgyaNUIQpBw4YNiYyMpFatWuqx2bNnY21tLYGxhDh37hxr166lRYsWmJubEx4ezuLFixk5cmSRCYoZyTvGf0hgFKIQ2NraFsm5NUXesbKyIjIykv/85z88ePCA8uXLM2nSJGbNmlXYVcuSLFT8D3nHKF5YZ8+eJTAwkNKlS2NlZYWHh4feDCKRkZG0bdtWneO2X79+et3tr169ikaj4ZtvvmHEiBHY29tTtmxZPvroIwC+/fZbPDw8sLW1pXv37sTGxqp5w8PD0Wg0nDx5Evini/2kSZPQaDR682kmJiYyfvx4XF1dsbCwoH79+mzbtk3vWtKXRcpuLk9Im79z8eLFuLu7o9VqqVq1KkuXLtVLEx0dTa9evXB2dsbCwoIqVarkSeeOF02lSpU4cOAA9+/f5+nTp1y9epU5c+ZQqlTRbI8oBi459SK8Yyyan5AQBaBz5844OzuzatUq7OzsuHjxojrbSGRkJD4+PgQGBrJx40YePXrE9OnT6dq1K5GRkXrlTJs2jR49ehASEsL27duZMGECd+7cITw8nA8//JD4+HjGjBnDu+++yxdffJFlXSIjI2nevDljxoyhX79+wD8zq/Tv35/vv/+euXPnUrNmTdasWUOPHj3Yvn273swq6XN5TpkyBTs7O4KCgujWrRuXLl1SB4aPHTuWL7/8kmnTptG0aVOOHDnC5MmTsbS0ZMSIEQAMHDiQ69ev88knn+Ds7Mzff/+tBnBRcj1NSeZpSs49Tp+mJBdAbQqZIsQL6M6dOwqgfPfdd1l+3cvLS2nRooWSmpqqHjt79qyi0WiU3bt3K4qiKFeuXFEApVevXmqa5ORkxdnZWdHpdMrdu3fV4xMmTFDs7e3V/bCwMAVQTpw4oR4DlEWLFunV45dfflEA5fPPP9c73rx5c6Vhw4bq/qBBgxSNRqP89ttvmc5x6NAhRVEU5eLFi4pGo1FWrlypV9bkyZOVcuXKKSkpKYqiKIpOp1M++eSTLO+LKHni4uIUQFn7vwnK1gtTc9zW/m+CAihxcXGFXfV8I49SxQupdOnSVKpUiaCgIL7++mu9eSkfP37MTz/9RM+ePUlJSVHnpnR3d8fNzS3T3JQZ57k0NTWlatWq1K9fn9KlS6vH3d3diY2N5eHDh0bV89ChQwD07NlT73jv3r05deqU3tJEOc3lmT6GrkePHuo1JScn065dO27evElUVBSQ1jFo8eLF/Oc//+HixYtG1VcUX4Y8RjW0g05xJ4FRvJA0Gg2hoaHUqlWL0aNH4+bmRqNGjYiIiCAmJoaUlBTGjRuHmZmZ3vb333+rASRdVvNcGjOvZnZiYmIwMzPTm4Qe0ubDVBRF771lTue8e/cuiqLg5OSkd03pgT39ujZu3Iivry/Tpk2jRo0a1KxZk61btxpVb1H8pCgpBm8lnQRG8cJyd3cnJCSEmJgYwsPD0Wq1dO7cGWtrazQaDdOmTePEiROZtunTpxdYHR0dHXn69CkxMTF6x2/duoVGo8kUDHMqS6PR8NNPP2V5XS+99BKQNs/nV199xd27dzl+/DgeHh707t2by5cv5+WliSImNTXV4C2/7dy5k5deegkLCwvc3d0JDg7OMU96Z7hnt9z0/pbON+KFZ2Zmhre3N1OmTKFLly7cunWL5s2bc+7cOebMmVOg9Xi2RZm+zl9ISAjDhw9Xj4eEhKi9Tw3l6+sLwL179+jcuXOO6U1MTGjcuDFz5szhu+++4+LFi1StWtXg84nipaiMYzx8+DDdunXjjTfeYNmyZRw4cIDXX38dGxsbXn311Rzzz5s3jzZt2qj7z84wZggJjOKF9OuvvzJhwgR69+5NtWrViIuLY/78+VSuXJlq1aqxaNEi2rZtS+/evenTpw8ODg5ER0fzww8/MGTIEHx8fPK8TrVq1WLHjh20bt0anU6Hh4cH9erVo3v37owfP56EhAQ8PDxYu3YtR44cYceOHUaV7+7uzujRo3nttdeYNGkSTZs25enTp1y4cIGwsDC2b99OXFwc7du357XXXsPDw4MnT56wfPly7O3tadiwYZ5fsyg6lNRUUlMNWHYqn1uMH3zwAU2bNuXzzz8HoE2bNly6dIn33nvPoMBYo0aNfz1GWAKjeCGVK1eOcuXKMX/+fK5du4adnR2tW7dm7dq1mJqa0qJFCw4fPszMmTMZMmQIT548oUKFCvj6+lK9evV8qdNnn33G2LFjCQgIICEhgbCwMHx8fFi7di1Tp05lwYIF3L9/n5o1a7J582aDWn3P+uSTT/Dw8GDlypW8//77WFtb4+HhoXbusbCwwNPTk+XLl/P3339jaWlJo0aNCA0NxcnJKa8vWRQhT1OeUiol5yWrDBnSkVtJSUmEhYXpjScG6NOnDxs2bODq1at6qwzlF5krVQghXmDpc6UuPzQQS2vzHNMnPHzCmNZriIqK0psrVavVZlob0li///47derUYe/evXTo0EE9/ueff+Lu7p7peEZXr16lSpUqODk5cf/+fUqXLk3Xrl1ZuHBhps5rOZHON0IIIUhNTTF4g7SVg+zs7NRt/vz5/7oO6Z3Mnu1Ulr6W7f3795+bV6vVMnLkSL788ksOHDjAxIkT2bRpE76+vjx9alwrVx6lCiGEMHqu1KxajFmJi4vjxo0bOZb7bzt2ubi4sGLFCnXf29ubOnXq0KlTJ7Zt20avXr0MLksCoxBCCIPHKKansbW1NWjZqZCQEIYNG5ZjunPnzqktw7i4OL2vpbckjX0kGhgYiE6n4+effzYqMMqjVCGEEP8/RtGQR6nG9Up94403UBQlx61mzZpUq1YNMzMzzp8/r1dG+n7NmjXz7HqzI4FRCCEEKalPSU7JeUtJzb9eqVqtljZt2rB582a94xs3bqRWrVpG90jdtWsXjx49onHjxkblk0epQggh/v9Ras5tpfyeEm7GjBn4+PgwatQoevXqRVhYGOvXr2fjxo166UqVKsWgQYNYtWoVABMmTMDExIRmzZphb2/P8ePHmT9/Po0aNeKVV14xqg4SGIUQQhg83Vt+TwnXqlUrtm7dyvTp01m1ahUVK1bkyy+/zDSRfkpKCikp/wTp2rVrs2LFCr744gseP35M+fLlef3115k9e7bRa2DKOEYhhHiBpY9jnLW3PRY6sxzTJz56yqyAfcTFxRnU+aY4khajEEKI/+9Yk/OjVEOmjSvuJDAKIYQgRUk1cLhG/q+uUdgkMAohhCAl5SnJKTm/WUtJSS6A2hQuCYxCCCFITVVITc05MBqSpriTwCiEEMLoKeFKMgmMQgghpMWYgQRGIYQQpCoGBsYXYISfBEYhhBBpgdGAoCeBUQghxAshOTkF02SNQelKOgmMQgghpMWYgQRGIYQQKKmGdax5ATqlSmAUQgiRPlwj50epMlxDCCHEC0GGa/xDAqMQQggJjBlIYBRCCIGiKBiyCuGLsFJhzmuMCCEKzODBg6lbt25hV0O8gJJTUkhONmBLkeEaQogCNGPGDB49elTY1RAvIBmu8Q8JjEIUIdWqVSvsKogXlLxj/Ic8ShUiD6Q/Ag0PD6dBgwbodDqaNGnCzz//rKZJTExk/PjxuLq6YmFhQf369dm2bVuW5aSLjY1l2LBhlC9fHgsLC9zc3OjTp49enujoaAYMGICTkxOWlpZ4eXnpnVcIQ6Smphq8lXQSGIXIIzdv3uTtt99m0qRJbNq0icTERLp168bTp08B6N+/PytXruTdd99l+/bt1K5dmx49evDdd989t8zx48eza9cu5s2bx759+1i0aBFarVb9ekxMDK1ateL06dMsX76cLVu2oNPpaNu2Lbdv3873axYlh/L/j1Jz2l6EzjfyKFWIPHL//n0OHjxInTp1ANDpdLRp04Zjx45ha2vL1q1b+fzzz3nzzTcB6NChA1evXmX27Nl06dIlyzKPHz9Ov379GDRokHosY4tx2bJlxMbGcvz4ccqWLQuAr68v7u7uLF68mA8//DC/LleUMPIo9R8SGIXII66urmpQBKhduzaQ9qjz3r17APTs2VMvT+/evRk3bhyPHj1Cp9NlKrNhw4asXr0aFxcXOnTokKnHamhoKG3atMHR0ZHk5GQATE1N8fb25sSJE3l6faJkS05JQWNAh1PplSqEMJi9vb3evrm5OZD2bjEmJgYzMzMcHR310jg7O6MoCrGxsVkGxuXLl+Po6MiSJUuYNGkSbm5uBAUFMXLkSADu3r3L0aNHMTMzy5RXOvIIY6QaOFfqC/CKUQKjEAXB0dGRp0+fEhMTg4ODg3r81q1baDSaTEE1nZ2dHcuWLWPZsmWcOXOGjz/+mFGjRlG3bl1at26No6MjHTp04IMPPsiUN+O7SCFyIo9S/yGdb4QoAK1atQIgJCRE73hISIjaizUnnp6eLF26FIBz584B0K5dO37//Xdq1apFo0aN9DZPT888vgpRkhnS8cbQsY7FnbQYhSgA9erVo3v37owfP56EhAQ8PDxYu3YtR44cYceOHc/N17JlS7p160bdunUxNTVlzZo1mJub07p1ayCt1+q6devw9vZm7NixVKxYkTt37nDs2DFcXV0ZN25cQV2iKOZSFQNbjBIYhRB5Ze3atUydOpUFCxZw//59atasyebNm+ncufNz87Rs2ZI1a9Zw5coVTExM8PT0ZOfOndSqVQuA0qVLc/ToUaZPn87kyZO5d+8eZcuWpVmzZnTr1q2gLk2UAEqqYWstvgCrTqFRXoRBKUIIIbIUHx+PnZ0d7eZUpJRFzm/XkhNT2T/9b+Li4rC1tS2AGhY8aTEKIYT4/16phqUr6SQwCiGEQElVUAx4x2hImuJOAqMQQghpMWYggVEIIYR0vslAAqMQQghZjzEDCYxFRGpqKtevX8fGxgaNRlPY1RFCFCJFUXjw4AGurq6YmBTMPCwpKaBJNixdSSeBsYi4fv06bm5uhV0NIUQREhUVRYUKFQrkXNL55h8SGIsIGxsbAC7++af6/7ySH9/Gym/38qFUKFXNPn/KfWjAn8K58NTBPM/L1KTkzy+exJM386XcpOv386Xc/KDk073Naw8SH9Fwyit5/rsgO9L55h8FFhgHDx7MyZMn+e233wrqlMVK+uNTGxubPB80my+BUfckH0qFUvk0YLiUIc+IcuGpbfEJjOa6R/lSbqJlUr6Umx+KS2BMV5CvVRQlbTMkXUlXYIFxxowZPHqUPz+YQggh/h1ZXeMfBRYYC2ptuISEBCwtLQvkXEIIUWIYOFyDF+BRaoEtOzV48GB19fHVq1ej0Wg4deoUAQEB6HQ6atSowZo1azLl2717Ny1btsTKygoHBwd8fHw4deoUAOHh4Wg0Gnbv3s2rr76Kra2tukJ6bGwso0aNwsXFBa1Wy8svv0xoaGimsv38/Chbtiy2trY0bdqU77//Xi9NbGwsw4YNo3z58lhYWODm5kafPn300kRHRzNgwACcnJywtLTEy8uLn3/+Oc/unRBC5Lf0d4yGbCVdoa7H2L9/f/z9/dm+fTsNGjRg8ODB6jpzABs3bqRz586ULVuW9evXs27dOlq2bMm1a9f0yhk+fDjVqlVj27ZtTJw4kSdPnuDn58euXbuYO3cu3333HbVr16Zjx46cOXNGzXflyhU6d+7MN998w5YtW2jZsiWBgYGEh4eracaPH8+uXbuYN28e+/btY9GiRXoLwMbExNCqVStOnz7N8uXL2bJlCzqdjrZt23L79u3nXntSUhLx8fF6mxBCFJaUFMXgraQr1F6pb731FqNGjQKgRYsW7N69my1btjB9+nQURWHixIn4+/uzbds2NU9gYGCmcrp06cLChQvV/eDgYE6fPs0vv/xC7dq1AWjfvj1//vknH3zwAZs2bVLPny41NZU2bdpw9uxZvvjiC3x8fAA4fvw4/fr1Y9CgQWrajC3GZcuWERsby/HjxylbtiwAvr6+uLu7s3jxYj788MMsr33+/PnMnj3bqPslhBD5RWa++Uehthj9/f3V/+t0OipVqkR0dDQAf/zxB9HR0QwdOjTHcjp27Ki3HxoaiqenJ+7u7iQnJ6ubn58fJ06cUNNFR0czaNAgypcvT6lSpTAzMyM0NJQLFy6oaRo2bMjq1atZvHhxlj1qQ0NDadOmDY6Ojup5TE1N8fb21jvXs4KCgoiLi1O3qKioHK9TCCHyS6pi4KPUfG4w/vDDD/Tr149q1aqh0Wj0GjA5iYuL4/XXX8fR0REbGxteffVVbty4YXQdCrXFaG9vr7dvbm5OYmIiAPfupY2Tc3V1zbEcZ2dnvf27d+9y6tQpzMzMMqU1NTUF0lqIXbp0IS4ujvfff5/q1auj0+l47733+Pvvv9X0y5cvx9HRkSVLljBp0iTc3NwICgpi5MiR6rmOHj2a5bmy63Ck1Wr1HskKIURhKioD/L///nt++eUXvL29uX/fuDGyvXv35uzZs3z++edYWFgwbdo0AgICOHnyJKVKGR7uiuwA/9KlSwNpM8Lk5NmxPo6OjtSrV49Vq1Y9N8/Fixc5deoU27dvp2vXrurxhIQEvXR2dnYsW7aMZcuWcebMGT7++GNGjRpF3bp1ad26NY6OjnTo0IEPPvgg0zkk8AkhiovUVNAUgQH+ixYtYsmSJQAcOHDA4HyRkZHs27ePffv2qU8jPTw8qFWrFlu3bqVXr14Gl1VkA6OHhwcVKlQgODjYqAsCaNeuHXv27MHV1fW5Lc70AGhu/s8A7b/++ouffvoJd3f3LPN4enqydOlSVq1axblz52jdujXt2rVj7dq11KpVC51OZ1Q9hRCiqCgqA/xzOzfs3r17sbe3x8/PTz3m4eFB/fr12bNnT8kIjBqNhsWLF9O3b1969OjBwIED0Wq1REZG0rhxYzp16vTcvAMHDmTlypX4+PgwceJE3N3diY2N5dSpUzx58oT58+dTs2ZNKlSowJQpU0hJSeHhw4fMnDmT8uXL65XVsmVLunXrRt26dTE1NWXNmjWYm5vTunVrIK3X6rp16/D29mbs2LFUrFiRO3fucOzYMVxdXRk3bly+3ichhMgLTxNSSUk2YID/07Q0z/akL+zXQ+fPn8fDwyPTE8RatWpx/vx5o8oqsoER0p4XW1lZMXfuXPr06YOFhQUNGzakW7du2ebTarUcOHCAWbNmMXfuXG7cuIGTkxMNGjRQe8FqtVq2bt3K6NGj6dmzJ25ubkyfPp0DBw5w8uRJtayWLVuyZs0arly5gomJCZ6enuzcuZNatWoBaY98jx49yvTp05k8eTL37t2jbNmyNGvWLMd6CiFEYTM3N6dcuXJc3mD4XLrW1taZFj2YOXMms2bNyuPaGS4mJiZTvxUABwcHo99VahTlRZj5ruiLj4/Hzs6OWzdv5vlcqfkxWWqK5mneFyrylQmm+VKu+b38mYf25w6f5XmZt27cyvMyARKT83a+2MfKEwY/CCYuLi7vfx9kITExkSdPDJ//WFGUTC2z57UY4+LiDOoZWrVqVb1XWwCVK1emU6dOfPrppznm9/Pzw9TUNNMkLW+99Vam0QY5KdItRiGEEPnPwsICCwuLfCk7JCSEYcOG5Zju3Llz1KxZM9fncXBwyHLYW0xMDI6OjkaVVajjGAtCxqno8ltsbCyzZs3i999/L5DzCSFEUffGG2+gKEqO278JigA1a9bkjz/+4NmHoOfPnze67BIfGAtSbGwss2fPlsAohBAFLCAggJiYGH788Uf12IULFzh16lSWM6ZlRx6lCiGEKDL++usvddawx48fc+nSJTZv3gzAq6++qqYrVaoUgwYNUserN2/enPbt2zN06FCWLFmiDvCvV68e3bt3N6oOL0yLMTw8nAYNGqDT6WjSpIne6heKorB48WLc3d3RarVUrVqVpUuX6uU/f/48ffr0wc3NDSsrK2rXrs2SJUtI/f/RrlevXqVKlSoA9OzZE41Gg0aj4erVqwV2jUIIUdyFhYXRs2dPevbsyZ07d/j+++/V/YxSUlJISUnRO7Zx40b8/PwYPnw4/fr1o0aNGuzZs8eoWW/gBWkx3rx5k7fffpspU6ZgZ2dHUFAQ3bp149KlS5iZmTF27Fi+/PJLpk2bRtOmTTly5AiTJ0/G0tKSESNGAHDt2jU8PDzo378/NjY2nD59mpkzZ6rjH11cXNi6dSvdu3dn3rx5tGnTBgAXF5cs65SUlERS0j892WR1DSGESOsXMnjw4BzTZTWgws7OjlWrVmU765khXojAeP/+fQ4ePEidOnWAtAnL27Rpw7Fjx3BxceHTTz/l888/Z/jw4UDazDmPHz9m9uzZDB8+HBMTE3x9ffH19QXSPpBWrVrx+PFjPv30U2bOnIlWq6VBgwYA1KhRg2bNmmVbJ1ldQwghiqYX4lGqq6urGhQBdSmq6Oho9u/fD0CPHj30VuJo164dN2/eVLv/JiYmMnPmTKpXr45Wq8XMzIxp06Zx48YNHj58aHSdZHUNIYQoml6IFmNWq3hAWrC7e/cuiqLg5OSUZd6oqCgqVarE5MmT+e9//8vMmTN5+eWXsbe3Z8eOHcyZM4fExESsra2NqlNhT58khBAiay9EYMyOo6MjGo2Gw4cPZ5p1AdImoYW0QapvvvkmkydPVr+2e/fuAqunEEKIgvHCB8b094b37t2jc+fOz02XkJCgFzhTUlL49ttv9dJkbIkKIYQonl74wOju7s7o0aN57bXXmDRpEk2bNuXp06dcuHCBsLAwtm/fDqTNw/ff//6X2rVr4+TkxIoVK/R6lQKUK1cOe3t7NmzYQJUqVdBqtdSrVy/LlqgQQoii6YXofJOTTz75hDlz5vDtt9/SsWNHBgwYwMaNG/H29lbTLF++HG9vb8aMGcPrr7+Op6cnU6dO1SvHxMSE4OBgrly5gq+vL40bNzZooWUhhBBFh6yuUUTk5+oaCvm85HYeill5Il/K/evj8Hwpt0ynWnle5i/fHMzzMgFuPjJu6R1DpSrF5/uruEhQnjI5dXuBra4h9EmLUQghhMigWARGHx8fOnXqVNjVEEII8QIoFoFRCCGEKCglOjAmJCQUdhWEEEIUM0YFxkOHDqHRaLh06ZJ6rHPnzmg0Gs6ePase69u3Lx07dgRgypQpeHp6Ym1tTfny5enbty83btzQK/enn37Cy8sLOzs7bGxs8PT05Ouvv850/s2bN+Ph4YG1tTVt27bVq8fVq1fRaDSsXr2aYcOGUbp0aZo0aQKkzZU6dOhQnJycsLS0pEWLFkRERGQqf+XKlXh4eKDVaqlcuTJz5sxRV88AWL16NRqNhpMnT+Lv74+VlRUeHh7s37+f1NRUpk+fjrOzM87OzgQFBenlFUIIUTwYFRibNGmChYWFGlRSU1M5fPiw3jGAiIgIvLy8ALh9+zZTp05l9+7dfPzxx1y9ehVvb2+Sk5OBtN6YHTt2xNbWlg0bNrB9+3aGDx9ObGys3rlPnz7NokWLWLBgAatXr+bixYsMGDAgUx2DgoJQFIUNGzawaNEiUlJSCAgIYOfOnSxcuJCQkBCsra3x8/PTW3pq+fLljBgxgvbt27Nz504GDx7MrFmzePfddzOdY+DAgXTq1Ilt27bh6upK9+7dGTt2LFFRUaxZs4bRo0ezYMGCTBMAZJSUlER8fLzeJoQQovAZNcBfq9XSpEkTIiIiGDJkCL/++iuPHj1i6NChHDx4kJEjR3Lx4kWuX7+uBsavvvpKzZ+SkkLz5s2pUKECBw4cwN/fnwsXLhAXF8f8+fPx9PQE/pmNJqPY2FhOnTpFmTJlAHj48CFDhgwhOjqaChUqqOnq16/Pl19+qe5/9913HD9+nO+//5727dsD0L59e6pXr868efPYsmULKSkpvP/++/Tp04dPPvkEAH9/f548ecKSJUsICgqidOnSapljxoxh5MiRAJQvXx5PT09OnjxJZGSkWv53331HSEgI/fr1y/JeyuoaQghRNBn9jtHLy0ttHUZERNCoUSMCAgL0jllZWdGoUSMA9u7dS4sWLbCzs6NUqVJqELtw4QIA1apVw9bWlpEjR7Jp0ybu3LmT5Xnr16+vBkXQXyEjo/RHuOkOHTqEra2tGhQBzMzM6N69O4cPHwbSFiG+e/dupoUwe/fuzZMnTzh+/LjecT8/P/X/7u7uQOZg7u7unu2KGbK6hhBCFE1GB0Zvb28uX77MtWvX1EemrVu35ubNm/z5559ERETQrFkzzMzMOHHiBF26dMHV1ZVvvvmGyMhIjh49Cvwzn6iDgwM//PADNjY2vPbaa5QrVw4fHx/OnDmjd97sVsjIyNnZWW8/JiaGsmXLZroOZ2dn7t+/r6bJKm/6fnq6rOqSXo+s6pfdnKlarRZbW1u9TQghROEzOjA2b94cMzMzIiIiOHToEF5eXjg6OlKnTh0OHjxIREQErVu3BmDbtm3Y2dmxadMmunTpQrNmzShXrlymMps0acLevXuJjY1l586d3L59m1deeSVXF6TRaPT2HR0duX37dqZ0t27dwtHRUU0DZEp369Ytva8LIYQo+YwOjDqdjoYNG7Jy5Uru3btHq1atgLSW5Lp167hy5Yr6fjEhIQEzMzO9YLVu3brnlm1paUlgYCAjR47kypUrebJKRatWrYiPjyc0NFQ9lpyczLZt29S6e3h4UKZMGUJCQvTybtq0CXNzc7V3qxBCiJIvV6treHl5sWjRIho2bKg+AvTy8uKzzz7DzMyM5s2bA2nv4pYtW8aYMWPo1q0bkZGRfPPNN3pl7d69m1WrVtGtWzcqVqzIzZs3Wb58OS1btsTCwuJfXl7aO8cmTZowYMAAFixYgLOzM8uXL+fGjRvqJOCmpqbMmDGDt99+m7JlyxIYGMjRo0dZuHAh77zzjl7HGyGEECVbrgKjt7c3ixYtUluGgPr/Ro0aYWlpCUBgYCALFy5k+fLlBAcH07JlS3bt2qV2WAGoXr06JiYmTJs2jdu3b1O6dGn8/f2ZP3/+v7kulampKXv27GHixIlMmjSJR48e0bBhQ0JDQ3n55ZfVdGPGjMHMzIyPPvqIFStW4OLiwqxZszKtoJFf0udyf/DgQd6XXYwmEX+Q+Chfyn2UmpRzolywePI4z8t8rDzJ8zIhbWLq/CCTiOe9xP//rGSNh8Ihq2sUEdHR0bi5uRV2NYQQRUhUVJTecDRRMCQwFhGpqalcv34dGxubTB2InhUfH4+bmxtRUVF51ps1P8osbuUWp7rmV7nFqa75VW5RqKuiKDx48ABXV1dMTEr0zJ1FUq4epYq8Z2JiYvRfhvkxzCO/ho4Up3KLU13zq9ziVNf8Krew62pnZ5fn5xaGkT9FhBBCiAwkMAohhBAZSGAshrRaLTNnzkSr1RbpMotbucWprvlVbnGqa36VW5zqKvKHdL4RQgghMpAWoxBCCJGBBEYhhBAiAwmMQgghRAYSGEWhunjxIiNGjKB+/fqUKlWKunXrZplu1apVuLu7Y2FhwUsvvcSuXbsMKv/69ev06NEDGxsbHB0deeONN4iPj8/LS8hXDx8+pEKFCmg0Gk6ePJltWkVRWLBgARUrVsTS0pLmzZury7wVN4Z+XzyrJN0DUXgkMIpCdfbsWXbv3k316tXVxaef9e233zJs2DB69+7N3r17ad68Od26dcvxF97Tp09p3749Fy5cYP369fznP/9h37599OvXLz8uJV988MEHJCcnG5R24cKFzJw5k3HjxrFr1y5cXFzw9/fn8uXL+VzLvGfI90VWStI9EIVIEaIQpaSkqP8fNGiQUqdOnUxp3N3dlb59++oda968uRIQEJBt2evXr1c0Go1y/vx59di+ffsUQDl27Ni/rHn+O3funKLT6ZTPP/9cAZQTJ048N21CQoJia2urBAUFqceSkpKUSpUqKSNHjiyI6uYpQ74vnlXS7oEoPNJiLAb27t2LRqNBo9Ewbdo09Xi3bt3QaDTodDouXLhQiDXMvZzmgbx8+TIXLlygV69eesf79OnDjz/+SFLS81fN2Lt3L/Xq1cPDw0M95ufnh6OjI3v27Pl3FS8AY8aMYcSIEXr1f54jR44QHx+vd5/Mzc3p3r17sbjWZ+VmftCSdg9E4ZHAWAwEBAQwfPhwABYtWsSZM2fYtGkT27dvB9IeH2VcyqskOX/+PAA1a9bUO16rVi2ePHnClStXss37bD6NRkPNmjXVcouqzZs3c+bMGd577z2D0md3n/7++28SEhLyvI5FjdwDkVckMBYTS5YsoWrVqjx9+pQhQ4YwZswYANq1a8fo0aMLuXb5JyYmBgB7e3u94w4ODgDcv38/27zP5kvPm12+wvb48WPGjx/PvHnzDJ7EOiYmBq1Wm2lxbwcHBxRFUe9jSSb3QOQVCYzFhLW1NWvWrMHExISff/6Z27dvY2dnR3BwcI7LVIni5f/au/egqMo3DuDfBZaLLCK7XBIVVgyBFAdJvIAGivcVFa3MSUWdrDRLxchLpmJkFDKS5G1GE7w1golGFoMlKioKOV4Sb6hoYshFVwwFYeH5/eHs+XHYXW5CeHk+Mzvjec97l9l333Pe856IiAg4ODhg2rRprV0Vxl5KPDA+R/z8/NCrVy/hePTo0S/8S0y1M8OSkhJRuPbXv1wurzNt7XTatHWla003b95EdHQ0wsPDUVJSgvv376O0tBTAk0c3tP+uzcbGBo8fP0Z5ebkoXK1WQyKRCP34IuM+YM2FB8bnSHx8PDIzM4XjHTt24Pjx461Yo5anvV9U+57gpUuXYGpqChcXlzrT1k5HRLh8+bLOfahnRW5uLioqKqBSqWBjYwMbGxsEBQUBAAYOHIjBgwfrTadtz+XLl0Xhly5dEp7pe9FxH7DmwgPjc+LWrVuYM2cOgCczRQ8PD1RXVyMkJASPHj1q5dq1HBcXF3Tt2hWJiYmi8F27diEwMBCmpqYG044YMQJnz55FTk6OEPbHH3/g7t27GDlyZIvV+Wl4eXkhLS1N9Fm9ejUAYMOGDVi3bp3edL6+vmjbtq2onyorK7Fnz55ntq3NjfuANZtWflyENUB1dTUFBgYSALKxsaH8/HzKyMggIyMjAkCzZs1q7So22cOHDykxMZESExMpICCAOnXqJBwXFhYS0f+fR1y6dCmlpaXRhx9+SCYmJnT8+HEhnxs3bpCxsTGFh4cLYRUVFdS9e3fy9PSk5ORk2rVrF3Xq1IlUKtV/3s6nkZaWpvMc46BBg6hLly6ieF9//TWZmZlRTEwM/fHHHzR+/HiysrKia9eu/ddVfmoN+bt40fuAtR4eGJ8DsbGxBIAAUFxcnBA+f/58AkASiYRSU1NbsYZNl5ubK7St9ictLU2It2nTJnr11VfJ1NRUGOj05bNs2TJReF5eHo0bN45kMhm1a9eOpk+fTiUlJf9By5qPvoHR39+fnJ2dRfGqq6tp5cqV1LFjRzIzM6M+ffqIfjw8Txryd/Gi9wFrPfw+RsYYY6wGvsfIGGOM1cADI2OMMVYDD4yMMcZYDTwwMsYYYzXwwMgYY4zVwAMjY4y1kKlTp6J79+6tXQ3WSPy4BmOMtZBr167h4cOH6NGjR2tXhTUCD4yMMface/z4MaRSaZNe8Mx0cS8yxlgt2kughw4dQs+ePWFpaYnevXvj1KlTQpzy8nKEhobC0dER5ubm8PLyQlJSkt58tO7fv48ZM2agQ4cOMDc3R6dOnfDOO++I0uTl5WHSpEmwtbWFhYUF3njjDVG5AKBUKjF79mx8++23cHZ2hoWFxTP9jtHnjUlrV4Axxp5Fd+7cwSeffIKFCxfC2toaixYtQnBwMK5duwapVIp3330XKSkp+Oqrr+Du7o6tW7di/Pjx2Lt3L0aPHq03z9DQUPz222+IjIyEUqlEfn4+fvvtN+G8Wq1G//79IZPJEBsbC2tra8TGxmLQoEHIycmBvb29EPenn36Cq6srvvvuOxgbG8PS0rLF++Sl0Zr70THG2LMoJCSEJBIJnT9/XgjT7lmbnp5OZ8+eJQC0YcMGUbp+/fqRt7e3KJ9u3boJx926daPQ0FCD5S5dupSsra2poKBACCsvLycnJycKCwsTwpydnUmhUFBpaelTtZPpx5dSGWNMD0dHR3Tr1k04fu211wA8udSZnp4OAHjrrbdEaSZMmIDTp0/j4cOHevP09vZGXFwcVq1ahfPnz+ucT01NxcCBAyGXy6HRaKDRaGBsbAx/f39kZWWJ4gYEBPAssYXwwMgYY3q0a9dOdKx992d5eTnUajWkUinkcrkojoODA4gI9+/f15tnbGwsJk+ejOjoaHh6esLJyQnr168XzhcXF2Pv3r2QSqWiz7Zt23Dr1i2dsljL4HuMjDHWSHK5HJWVlVCr1bCxsRHCCwoKIJFIdAZVLWtra8TExCAmJgZ//fUXvvvuO8yaNQvdu3fHgAEDIJfLMXz4cHz55Zc6ac3MzETHEomkWdvE/o9njIwx1kj9+/cHACQmJorCExMThVWs9fH09MTq1asBABcvXgQADB48GBcuXICHhwd69eol+nh6ejZzK5ghPGNkjLFG6tGjB8aNG4fQ0FCUlZXBzc0N27dvx/Hjx7Fv3z6D6fz8/BAcHIzu3bvD2NgYW7duhampKQYMGADgyarVHTt2wN/fH3PmzIGTkxOKiopw8uRJODo6Yt68ef9VE19qPDAyxlgTbN++HYsXL0ZkZCTu3bsHd3d37N69G0FBQQbT+Pn5YevWrcjNzYWRkRE8PT2RnJwMDw8PAIBCocCJEyewZMkSLFiwAHfv3oW9vT369u2L4ODg/6pprLWXxbLnV+2l6M9yHVavXk379+//D2rUNP7+/qRSqVq7GvU6ffo0LVu2jB4+fCgK37JlCwGgoqKiZimnoKCAZDIZ/fXXX3XGW716NdX8GsvNzSUAwkcikZCjoyNNnDiRbty4IUo7ePBgioiIaJb6shcL32NkTfbFF19g586drV2NBomJicGvv/7a2tUwaN26dYiOjm7tatTrzJkzCA8Px6NHj0ThKpUKGRkZBhedNNZXX32FgICAJm/AvXLlSmRkZODo0aOIjIxERkYGRo4ciaqqKiHO4sWLsWrVKqjV6mapM3tx8KVU1mRdunRp7Sq8MLTPyLWGsrIyWFhYPFUednZ2sLOza5b6lJaWYvPmzdi2bVuT83B1dUXfvn0BAL6+vmjbti3Gjh2Ly5cvC309cOBA2NjYID4+HnPnzm2OqrMXBM8YmUHZ2dkYOXIkFAoF2rRpAzc3N3z77bfCeX2v1Dl69Ch69uwJc3Nz9OjRAwcOHICXlxemTp2qk66ufSgBIDo6Gj4+PrC2toa9vT1GjRqFK1euNLodSqUSN2/exNq1ayGRSCCRSBAXFwcAqK6uRkREBJRKJczMzODu7o6NGzc2KN+AgACMGjUKu3fvhpubG2QyGQYNGoRr166J4t27dw/Tp08X9r709fXFkSNH9OallZeXh7fffhsODg4wNzdH586ddRZeXLx4EWPGjIG1tTUsLS2hUql0yq7txo0bQvtnzJgBhUKB3r17AwD279+PIUOGwN7eHm3btkWfPn2QkpIipI2Li8O0adMAPBkIJRIJlEqlcE4ikaC4uLhR7dZn9+7dAIARI0aIwh88eIApU6bAysoKdnZ2+Oyzz6DRaOrNDwCsrKwAAJWVlaLwt956C/Hx8Q3Kg708eGBkBgUFBUGtVmPz5s3Yv38/Pv30U4M7egBAfn4+hg8fDisrKyQkJCAsLAwzZ87E7du3deJq96EMCwtDQkICysvLERwcLPriysvLw+zZs7Fv3z5s2rQJ1dXV8PX1bfRmyUlJSXjllVfw5ptvIiMjAxkZGVCpVACAsLAwLF++HFOnTkVycjKGDh2KDz/8EN9//32D8j5z5gyioqIQGRmJuLg4XL16FZMmTRLOV1VVYcSIEUhOTsY333yDxMREyGQyDBkyROeHQE1TpkzBuXPnsGbNGqSkpCA8PFx0GfD69etCX8TFxWHnzp0oKipCYGAgHj9+XG+9Fy1aBCLCjz/+iKioKABAbm4ugoKCsG3bNvz000/w8/PDyJEjcejQIQBPLpcuWbIEAJCSkoKMjAydTbOftt0A8Pvvv8Pb2xvm5uai8OnTpyMpKQmRkZGIj4/HhQsXEBMTozeP6upqaDQaVFRU4OLFi1i+fDnc3d11fsj5+vrizJkzKCoqqq/L2MuktW9ysmdTUVERAaCff/7ZYJzaC1/CwsLI2tqaHjx4IISlp6cTAAoJCRGlq2sfSn00Gg09evSIZDIZbdy40WAdDHF2dqaPPvpIp41SqZQWLlwoCp84cSLZ2dmRRqOpM09/f3+ytLSkwsJCIUy7COXWrVtERLRv3z4CQCkpKUKciooKcnJyonHjxonyqrn4xtLSktasWWOw7ClTppCLiwuVlZUJYYWFhSSTyWjt2rUG02kXpwwfPrzOtlVVVVFlZSUNHTqUJk6cqNO+2otsaoc3tN36dO3aVef/Kjs7myQSCW3evFkI02g01Llz5zoX32g/Tk5OlJ2dbbA/fvnllzrrxF4uPGNkeikUCjg7O2PRokWIj49HXl5evWmysrIwcOBA4bIV8ORB6NrbZgF170OpdeLECQwZMgQKhQImJiZo06YNSktL67ycqt1fUvupy8mTJ1FZWal3v8uioiKhnKqqKoN5enl5ie6t1W5Heno62rZti2HDhglxpFIpxo0bh6NHjxqsm7e3N1atWoX169fj6tWrOudTU1MxevRomJiYCPWysbFBz549dfbU1Ec7Y64pLy8PISEh6NChA0xMTCCVSpGamtqky9dNbTfw5MpD7fuVWVlZICLRIwvGxsYYO3as3jy++eYbZGVlITMzE0lJSXB0dMTw4cN1rl7Y2toKZTKmxQMj00sikSA1NRUeHh746KOP0KlTJ/Tq1avOe0T6vtAAiF6Vo1XXPpQA8Pfff2Po0KGoqqrCxo0bcezYMWRlZcHe3l6Io0/tPSbrol2NWHvPSe2x9pJtly5dRHneuHGjwe1Qq9V62+/g4FDnJeFdu3YhMDAQn3/+OVxdXeHu7o49e/YI54uLixETE6PT3vT0dJ09NfWp3ebq6mqMHj0aR48exYoVK5CWloasrCyMGDGizv42pKntBp70Xe3tz/Lz8yGVSkXbr+lrh5aLiwt69eoFHx8fjB07Fj///DNu374t7DSjpS2nrKys3jaxlwevSmUGde3aFYmJiaisrMTx48exePFiBAUF4fbt25DJZDrx27dvr/deTWFhYaPLTklJQWlpKfbs2SMMPhqNpt4v1YbMlrS0M9nCwkJ06NBBCC8oKBCdT05OFt23c3R0bFQZ+tpfUFCgdyat1b59e/zwww/YtGkTTp06hYiICEyYMAGXL1+Gi4sL5HI5VCoVZs2apZO25ozdkNr7bF69ehWnT5/G3r17MWbMGCG8qQNGU9utTVt7E+727dsb3Ju0Iezs7GBra4vs7GxRuLYchULRoHzYy4FnjKxeUqkU/v7+WLhwIR48eIB//vlHbzwfHx8cPHgQ//77rxCWnp7epDeLl5WVQSKRiGZ9CQkJ9V4erb2/pJapqanOzKd3796QSqU6+10mJCTA3t4eXbt2BfBkT8uaeWpnhQ3Rv39/PHjwAKmpqUKYRqNBUlKSsN9mXYyMjODj44OIiAhoNBrhsurgwYNx/vx59OzZU6fNbm5uDa6flnYArNm2mzdv4tixY6J4tWfEhjxNu93c3JCbmysK8/HxAQDRYp+qqirs3bu3zry0CgoKUFxcLFw61dLO/pvSZ+zFxTNGpte5c+cwf/58TJgwAV26dEFJSQm+/vprKJVKg88vzps3D+vWrYNKpUJYWBju37+P8PBw2Nrawsiocb/BBg0aBACYNm0aPvjgA2RnZyM6OrrJD5B7eHjg4MGDOHDgAGxsbNC5c2fY2tri448/RlRUFMzNzdG3b1/8+uuv2LlzJ2JjY2FsbNyksmpSqVTo3bs3Jk2ahMjISDg4OCA2Nhb5+flYvHix3jQlJSUYNmwYJk+eDDc3N1RUVCA2Nhbt2rWDt7c3ACA8PBw+Pj4YNmwY3n//fTg4OODOnTs4fPgwBgwYgIkTJzaqnu7u7ujYsSMWLlyIqqoqlJaWYtmyZaKZNABh67K1a9di7NixaNOmjd7NrZvSbi0/Pz8kJCSIwl577TUEBwdj7ty5KC8vh1KpxLp161BRUaE3j5ycHJw4cQJEhNu3byMqKgoSiQQzZswQxfvzzz8hk8ng5eVVXxexl0lrr/5hz6aCggKaNGkSubi4kJmZGdnb29P48ePpypUrQhx9K0KPHDlCXl5eZGpqSh4eHvTLL7+QUqmkuXPn1plOrVYTANqyZYsQtnXrVnJxcSFzc3Pq27cvZWZm6qwubeiq1PPnz9OAAQPIyspKVE5VVRWtWLGCnJycSCqVkqurq85b2Q3Rt43b6dOnCQClpaUJYcXFxTR16lSSy+VkZmZG/fr1o0OHDhnMq7y8nN577z1yc3MjCwsLksvlNHToUMrMzBSluXLlCr399tukUCjIzMyMlEolTZkyRbTatzbtKszExESdc5mZmeTj40Pm5ubk6upK8fHxevt3+fLl1LFjRzIyMiJnZ2ci0r9atSHt1ufUqVMEQPS3RvTkb+Tdd98lS0tLUigUFBoaSlFRUfWuSrW1taXAwEA6fPiwTllBQUE0efLkeuvEXi4SIqJWGpPZSyAnJwfu7u744YcfEBIS0trVYc+J119/HWPGjMHSpUtbrAy1Wo1XXnkFBw4cwBtvvNFi5bDnDw+MrFktWrQIPXr0gKOjI65fv46VK1eirKwMly5d0rtghzF99u3bh5kzZyI3N1dnhWpzWbFiBQ4dOoSDBw+2SP7s+cX3GFmzqqiowIIFC1BQUAALCwsEBAQgKiqKB0XWKGPGjEFOTg5u3bqFV199tUXKkMvlWLNmTYvkzZ5vPGNkjDHGauDHNRhjjLEaeGBkjDHGauCBkTHGGKuBB0bGGGOsBh4YGWOMsRp4YGSMMcZq4IGRMcYYq4EHRsYYY6yG/wEjo22mSIff+gAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "variations_table_plot = {k:variations_table[k] for k in variations_table if k in ['time stretching', 'pitch shifting', 'noise']}\n", + "fig = benchmark.plot_variations(variations_table_plot, show_diff = True, figsize=(4.6, 4.2));\n", + "# fig.savefig(f'example_{dataset_name}_context.pdf', bbox_inches='tight')" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "SUPERB - IC Task (FSC).ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/speech/new_notebook.ipynb b/examples/speech/new_notebook.ipynb deleted file mode 100644 index eec1ad7..0000000 --- a/examples/speech/new_notebook.ipynb +++ /dev/null @@ -1,504 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Speech XAI" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "from datasets import Dataset, load_dataset\n", - "from IPython.display import display\n", - "import numpy as np \n", - "import os\n", - "import pandas as pd\n", - "from pathlib import Path\n", - "from pydub import AudioSegment\n", - "import torch\n", - "from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor\n", - "\n", - "from ferret import SpeechBenchmark, AOPC_Comprehensiveness_Evaluation_Speech, AOPC_Sufficiency_Evaluation_Speech" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "DATASET_ID = \"DynamicSuperb/IntentClassification_FluentSpeechCommands-Action\"" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "cuda:2\n" - ] - } - ], - "source": [ - "device = 'cuda:2' if torch.cuda.is_available() else 'cpu'\n", - "print(device)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Data" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Dataset({\n", - " features: ['file', 'speakerId', 'transcription', 'audio', 'label', 'instruction'],\n", - " num_rows: 200\n", - "})" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "data = load_dataset(DATASET_ID, split=\"test\")\n", - "data" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'file': 'wavs/speakers/Xygv5loxdZtrywr9/77506ae0-452b-11e9-a843-8db76f4b5e29.wav',\n", - " 'speakerId': 'Xygv5loxdZtrywr9',\n", - " 'transcription': 'Increase the temperature in the washroom',\n", - " 'audio': {'path': '77506ae0-452b-11e9-a843-8db76f4b5e29.wav',\n", - " 'array': array([0. , 0. , 0. , ..., 0.02133179, 0.01977539,\n", - " 0.01849365]),\n", - " 'sampling_rate': 16000},\n", - " 'label': 'increase',\n", - " 'instruction': 'Recognize the action behind the verbal expression. The answer could be activate, bring, change language, deactivate, decrease, or increase.'}" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "sample = data[0]\n", - "sample" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this notebook we are using Wav2Vec2 which expects audio arrays to be in 16kHz. Luckly, this is the native sampling rate of our data. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Models" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Some weights of the model checkpoint at superb/wav2vec2-base-superb-ic were not used when initializing Wav2Vec2ForSequenceClassification: ['wav2vec2.encoder.pos_conv_embed.conv.weight_g', 'wav2vec2.encoder.pos_conv_embed.conv.weight_v']\n", - "- This IS expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", - "- This IS NOT expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", - "Some weights of Wav2Vec2ForSequenceClassification were not initialized from the model checkpoint at superb/wav2vec2-base-superb-ic and are newly initialized: ['wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']\n", - "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" - ] - } - ], - "source": [ - "## Load model\n", - "model = Wav2Vec2ForSequenceClassification.from_pretrained(\n", - " \"superb/wav2vec2-base-superb-ic\"\n", - ")\n", - "feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n", - " \"superb/wav2vec2-base-superb-ic\"\n", - ")\n", - "\n", - "if torch.cuda.is_available():\n", - " model = model.to(device)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Speech-XAI: the `SpeechBenchmark` class" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note: if not specified otherwise, `SpeechBenchmark` assumes English as the source language." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "## Instantiate benchmark class\n", - "benchmark = SpeechBenchmark(model, feature_extractor, device=device)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's start from transcribing the example above using WhisperX." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n" - ] - }, - { - "data": { - "text/plain": [ - "(' Increase the temperature in the washroom.',\n", - " [{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438},\n", - " {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141},\n", - " {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444},\n", - " {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848},\n", - " {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953},\n", - " {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "text, word_timestamps = benchmark.transcribe(\n", - " sample[\"audio\"][\"array\"],\n", - " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", - ")\n", - "text, word_timestamps" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Explain word importance" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Word importance" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.47325948, -0.45515063, -0.10200211, -0.15734437, -0.12148061,\n", - " 0.0109534 ],\n", - " [ 0.07733697, -0.02064097, 0.34651279, -0.01588559, -0.01463729,\n", - " -0.02365428],\n", - " [-0.01432282, -0.01848161, -0.00988954, -0.00070852, -0.01123005,\n", - " 0.32860303]]), explainer='loo_speech+silence', target=[3, 4, 3], audio=)\n" - ] - } - ], - "source": [ - "explanation = benchmark.explain(\n", - " audio_path_or_array=sample[\"audio\"][\"array\"],\n", - " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", - " methodology='LOO',\n", - " word_timestamps=word_timestamps\n", - ")\n", - "# display(benchmark.show_table(explanation, decimals=3))\n", - "print(explanation)" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.30518979, -0.05905298, 0.02406042, 0.06312685, -0.01027066,\n", - " 0.00634839],\n", - " [-0.00192933, 0.04791304, 0.30365684, 0.01351917, -0.02577572,\n", - " 0.13388124],\n", - " [ 0.07868745, -0.02967894, 0.21510287, 0.02970933, 0.03952176,\n", - " 0.44306288]]), explainer='LIME+silence', target=[3, 4, 3], audio=)\n" - ] - } - ], - "source": [ - "explanation = benchmark.explain(\n", - " audio_path_or_array=sample[\"audio\"][\"array\"],\n", - " current_sr=sample[\"audio\"][\"sampling_rate\"], \n", - " methodology='LIME',\n", - " word_timestamps=word_timestamps\n", - ")\n", - "print(explanation)\n", - "#display(benchmark.show_table(explanation, decimals=3))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can run the same function but with no word timestamps. The class will generate them automatically." - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "int" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "type(sample[\"audio\"][\"sampling_rate\"])" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Transcribing audio to get word level timestamps...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", - "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", - "Transcribed audio with whisperX into: Increase the temperature in the washroom.\n", - "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 2.73476301e-01, -2.75996308e-02, 2.68968859e-02,\n", - " 4.38230033e-02, -9.83693653e-03, 3.43606501e-02],\n", - " [-4.55664511e-02, 2.00727565e-04, 3.07805104e-01,\n", - " -7.30904579e-03, 8.18154319e-03, 1.45066594e-01],\n", - " [ 7.67946057e-02, -1.63121582e-02, 1.69544374e-01,\n", - " 1.03233484e-02, 6.95427995e-02, 4.02942428e-01]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" - ] - } - ], - "source": [ - "explanation = benchmark.explain(\n", - " audio_path_or_array=sample[\"audio\"][\"array\"],\n", - " current_sr=sample[\"audio\"][\"sampling_rate\"], \n", - " methodology='LIME',\n", - ")\n", - "print(explanation)\n", - "#display(benchmark.show_table(explanation, decimals=3))" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(EvaluationSpeech(name='aopc_compr_speech', score=[0.32901989901438355, 0.4174739196896553, 0.5148161690682173], target=[3, 4, 3]),\n", - " EvaluationSpeech(name='aopc_suff', score=[0.17665663920342922, -0.009631142020225525, -0.01769007444381714], target=[3, 4, 3]))" - ] - }, - "execution_count": 30, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "aopc_compr = AOPC_Comprehensiveness_Evaluation_Speech(benchmark.model_helper)\n", - "evaluation_output_c = aopc_compr.compute_evaluation(explanation)\n", - "\n", - "aopc_suff = AOPC_Sufficiency_Evaluation_Speech(benchmark.model_helper)\n", - "evaluation_output_s = aopc_suff.compute_evaluation(explanation)\n", - "\n", - "evaluation_output_c, evaluation_output_s" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Explain paralinguistic impact" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Perturbation type: 75%|█████████████████████████████████████████████████████████████████████ | 6/8 [01:13<00:11, 5.92s/it]" - ] - } - ], - "source": [ - "explain_table = benchmark.explain(\n", - " audio_path_or_array=sample[\"audio\"][\"array\"],\n", - " current_sr=sample[\"audio\"][\"sampling_rate\"],\n", - " methodology='perturb_paraling',\n", - ")\n", - "display(benchmark.show_table(explain_table, decimals=2))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Show variation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "perturbation_types = ['time stretching', 'pitch shifting', 'reverberation', 'noise']\n", - "variations_table = benchmark.explain_variations(\n", - " audio_path=audio_path,\n", - " perturbation_types=perturbation_types\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "variations_table_plot = {k:variations_table[k] for k in variations_table if k in ['time stretching', 'pitch shifting', 'noise']}\n", - "fig = benchmark.plot_variations(variations_table_plot, show_diff = True, figsize=(4.6, 4.2));\n", - "# fig.savefig(f'example_{dataset_name}_context.pdf', bbox_inches='tight')" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "name": "SUPERB - IC Task (FSC).ipynb", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/ferret/benchmark_speech.py b/ferret/benchmark_speech.py index 3364084..fc4ce5c 100644 --- a/ferret/benchmark_speech.py +++ b/ferret/benchmark_speech.py @@ -298,11 +298,21 @@ def show_table(self, explanations, apply_style: bool = True, decimals=4): ) def explain_variations( - self, audio_path_or_array, perturbation_types, target_class=None + self, + audio_path_or_array, + current_sr: int, + perturbation_types: List[int], + target_class=None, ): - # TODO GA: we will probably need to update to the new FerretAudio class here as well + """ + Explain the variations of the audio. + Returns the importance of each perturbation. + """ + audio = FerretAudio( + audio_path_or_array=audio_path_or_array, current_sr=current_sr + ) perturbation_df_by_type = self.explainers["perturb_paraling"].explain_variations( - audio_path_or_array, perturbation_types, target_class + audio, perturbation_types, target_class ) return perturbation_df_by_type diff --git a/ferret/explainers/explanation_speech/paraling_speech_explainer.py b/ferret/explainers/explanation_speech/paraling_speech_explainer.py index ac18d34..b8fdc85 100644 --- a/ferret/explainers/explanation_speech/paraling_speech_explainer.py +++ b/ferret/explainers/explanation_speech/paraling_speech_explainer.py @@ -37,7 +37,7 @@ ENDPOINTS = { "WHITE_NOISE": "https://github.com/g8a9/ferret/raw/feat/support-speech-from-array/ferret/explainers/explanation_speech/white_noise.mp3", - "PINK_NOISE": "https://github.com/g8a9/ferret/raw/feat/support-speech-from-array/ferret/explainers/explanation_speech/pink_noise.mp3" + "PINK_NOISE": "https://github.com/g8a9/ferret/raw/feat/support-speech-from-array/ferret/explainers/explanation_speech/pink_noise.mp3", } @@ -461,16 +461,16 @@ def compute_explanation( return explanation - def explain_variations(self, audio_path, perturbation_types, target_class=None): + def explain_variations( + self, audio: FerretAudio, perturbation_types: List[int], target_class=None + ): n_labels = self.model_helper.n_labels - audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] - - original_gt = self.model_helper.get_predicted_probs(audio=audio) + audio_array = audio.array + original_gt = self.model_helper.get_predicted_probs(audio=audio_array) if target_class is None: - targets = self.model_helper.get_predicted_classes(audio=audio) - + targets = self.model_helper.get_predicted_classes(audio=audio_array) else: targets = target_class @@ -479,7 +479,7 @@ def explain_variations(self, audio_path, perturbation_types, target_class=None): perturbation_df_by_type = {} for perturbation_type in perturbation_types: perturbated_audios, perturbations = self.perturbe_waveform( - audio_path, perturbation_type, return_perturbations=True + audio, perturbation_type, return_perturbations=True ) if "time stretching" in perturbation_type: @@ -498,7 +498,6 @@ def explain_variations(self, audio_path, perturbation_types, target_class=None): prob_variations.append( [probs_modified[i][:, targets[i]][0] for i in range(n_labels)] ) - else: prob_variations.append([probs_modified[:, targets][0]]) From cb92e142b462a6743a1f67b83508ec6cdab397b9 Mon Sep 17 00:00:00 2001 From: Giuseppe Attanasio Date: Tue, 19 Mar 2024 23:04:29 +0000 Subject: [PATCH 10/21] update readme --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 525e3f1..e403c6d 100644 --- a/README.md +++ b/README.md @@ -44,23 +44,23 @@ pip install -U ferret-xai Our main dependencies are 🤗 `tranformers` and `datasets`. -If the speech XAI functionalities are needed, then +### (Optional) Install XAI Speech functionalities -``` -pip install -U ferret-xai[speech] -``` +If the speech XAI functionalities are needed, then follow these steps: -At the moment, the speech XAI-related dependencies are the only extra ones, so installing with `ferret-xai[speech]` or `ferret-xai[all]` is equivalent. +1. install the library with: `pip install -U ferret-xai[speech]` or `pip install -U ferret-xai[all]` +2. install whisperX with `pip install git+https://github.com/m-bain/whisperx.git` +3. install system-wide [ffmpeg](https://ffmpeg.org/download.html). If you have no sudo rights, you can try with `conda install conda-forge::ffmpeg` -**Important** Some of our dependencies might use the package name for `scikit-learn` and that breaks ferret installation. \ +**Troubleshoothing** + +Some of our dependencies might use the package name for `scikit-learn` and that breaks ferret installation. \ If your pip install command fails, try: ```bash SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True pip install -U ferret-xai ``` -This is hopefully a temporary situation! - ### Explain & Benchmark The code below provides a minimal example to run all the feature-attribution explainers supported by ferret and benchmark them on faithfulness metrics. From aab3360ac888044812a2f906b29a129893906ad7 Mon Sep 17 00:00:00 2001 From: Giuseppe Attanasio Date: Tue, 19 Mar 2024 23:05:19 +0000 Subject: [PATCH 11/21] update readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e403c6d..b14ae11 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ For the default installation, which does **not** include the dependencies for th pip install -U ferret-xai ``` -Our main dependencies are 🤗 `tranformers` and `datasets`. +Our main dependencies are 🤗 `transformers` and `datasets`. ### (Optional) Install XAI Speech functionalities From 6660ac5008ba30f11d00866e9bce49b974f33eee Mon Sep 17 00:00:00 2001 From: Giuseppe Attanasio Date: Tue, 19 Mar 2024 23:06:01 +0000 Subject: [PATCH 12/21] update readme --- README.md | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index b14ae11..fbeaa46 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,14 @@ For the default installation, which does **not** include the dependencies for th pip install -U ferret-xai ``` -Our main dependencies are 🤗 `transformers` and `datasets`. +**Troubleshoothing** + +Some of our dependencies might use the package name for `scikit-learn` and that breaks ferret installation. \ +If your pip install command fails, try: + +```bash +SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True pip install -U ferret-xai +``` ### (Optional) Install XAI Speech functionalities @@ -52,14 +59,6 @@ If the speech XAI functionalities are needed, then follow these steps: 2. install whisperX with `pip install git+https://github.com/m-bain/whisperx.git` 3. install system-wide [ffmpeg](https://ffmpeg.org/download.html). If you have no sudo rights, you can try with `conda install conda-forge::ffmpeg` -**Troubleshoothing** - -Some of our dependencies might use the package name for `scikit-learn` and that breaks ferret installation. \ -If your pip install command fails, try: - -```bash -SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True pip install -U ferret-xai -``` ### Explain & Benchmark From 8853e0061018fa581e117a61de4e066cec7acecd Mon Sep 17 00:00:00 2001 From: Gaia Geagea Date: Wed, 20 Mar 2024 16:27:55 +0100 Subject: [PATCH 13/21] removing remove_word_np and its occurrences --- .../loo_speech_explainer.py | 7 +--- .../explanation_speech/utils_removal.py | 41 ------------------- 2 files changed, 1 insertion(+), 47 deletions(-) diff --git a/ferret/explainers/explanation_speech/loo_speech_explainer.py b/ferret/explainers/explanation_speech/loo_speech_explainer.py index 7d8e103..ae26100 100644 --- a/ferret/explainers/explanation_speech/loo_speech_explainer.py +++ b/ferret/explainers/explanation_speech/loo_speech_explainer.py @@ -5,7 +5,7 @@ from pydub import AudioSegment from IPython.display import display from .explanation_speech import ExplanationSpeech -from .utils_removal import remove_word, remove_word_np +from .utils_removal import remove_word from ...speechxai_utils import pydub_to_np, FerretAudio from logging import getLogger @@ -41,11 +41,6 @@ def remove_words( for word in word_timestamps: audio_removed = remove_word(pydub_segment, word, removal_type) - - # to use remove_word_np after implementing the numpy array version of pink noise and white noise - # audio_removed = remove_word_np(audio.array, audio.sample_rate, word, removal_type ) - # audio_no_words.append(audio_removed) - audio_no_words.append(pydub_to_np(audio_removed)[0]) if display_audio: diff --git a/ferret/explainers/explanation_speech/utils_removal.py b/ferret/explainers/explanation_speech/utils_removal.py index e6744f5..6329d16 100644 --- a/ferret/explainers/explanation_speech/utils_removal.py +++ b/ferret/explainers/explanation_speech/utils_removal.py @@ -87,44 +87,3 @@ def remove_word(audio, word, removal_type: str = "nothing"): audio_removed = before_word_audio + replace_word_audio + after_word_audio return audio_removed - - -def remove_word_np(audio_array, sr, word, removal_type: str = "nothing"): - """ - Remove a word from audio as an array, by replacing it with: - - nothing - - silence - - white noise - - pink noise - - Args: - audio_array (np.ndarray): audio_array - sr : sample rate of audio - word: word to remove with its start and end times - removal_type (str, optional): type of removal. Defaults to "nothing". - """ - - a, b = 100, 40 - - start = int((word["start"] * 1000 - a) * sr / 1000) - end = int((word["end"] * 1000 + b) * sr / 1000) - before_word_audio = audio_array[:start] - after_word_audio = audio_array[end:] - word_duration = (end - start) + a + b - - if removal_type == "nothing": - replace_word_audio = np.array([], dtype=audio_array.dtype) - - elif removal_type == "silence": - replace_word_audio = np.zeros(word_duration, dtype=audio_array.dtype) - - elif removal_type == "pink noise": - pass # to change the pink_noise.mp3 to a numpy array - - elif removal_type == "white noise": - pass # to change the white_noise.mp3 tp a numpy array - - audio_removed = np.concatenate( - [before_word_audio, replace_word_audio, after_word_audio] - ) - return audio_removed From 0c50e852539bfd74a94b0491ed6e234e658aca4b Mon Sep 17 00:00:00 2001 From: Gaia Geagea Date: Wed, 20 Mar 2024 17:53:12 +0100 Subject: [PATCH 14/21] changing equal width explainers to calculate the duration of the audio using the numpy array when the pydub AS is not needed --- .../equal_width/gradient_equal_width_explainer.py | 12 +++++++----- .../equal_width/lime_equal_width_explainer.py | 10 +++++----- .../equal_width/loo_equal_width_explainer.py | 12 +++++++----- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py index f264923..604f872 100644 --- a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py @@ -4,7 +4,7 @@ import numpy as np import torch from ..explanation_speech import ExplanationSpeech -from ....speechxai_utils import pydub_to_np, FerretAudio +from ....speechxai_utils import FerretAudio class GradientEqualWidthSpeechExplainer: @@ -76,10 +76,10 @@ def compute_explanation( "Aggregation method not supported, choose between 'mean' and 'max'" ) - audio_array = audio.array + audio_np = audio.array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio_array]) + logits_original = self.model_helper.predict([audio_np]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -104,7 +104,7 @@ def compute_explanation( for target_label, target_class in enumerate(targets): # Get gradient importance for each frame attr = self._get_gradient_importance_frame_level( - audio_array, target_class, target_label + audio_np, target_class, target_label ) old_start = 0 @@ -113,7 +113,9 @@ def compute_explanation( importances = [] a, b = 0, 0 # 50, 20 - duration_s = len(audio_array) / audio.sample_rate # finds the duration from the array + duration_s = len(audio_np) / audio.sample_rate + # no need to use the duration on the pydub version since + # the pydub audio segment is not even used here a, b = 0, 0 for e, i in enumerate(np.arange(0, duration_s, num_s_split)): diff --git a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py index ac369c5..6fd377a 100644 --- a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py @@ -3,7 +3,7 @@ import numpy as np from ..lime_timeseries import LimeTimeSeriesExplainer from ..explanation_speech import ExplanationSpeech -from ....speechxai_utils import pydub_to_np, FerretAudio +from ....speechxai_utils import FerretAudio EMPTY_SPAN = "---" @@ -34,10 +34,10 @@ def compute_explanation( "Removal method not supported, choose between 'silence' and 'noise'" ) - audio_array = audio.array + audio_np = audio.array # Predict logits/probabilities - logits_original = self.model_helper.predict([audio_array]) + logits_original = self.model_helper.predict([audio_np]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -56,13 +56,13 @@ def compute_explanation( else: targets = [int(np.argmax(logits_original, axis=1)[0])] - audio_np = audio_array.reshape(1, -1) + # GG: removed the reshaping since it is already done in FerretAudio # Get the start and end indexes of the segments. These will be used to split the audio and derive LIME interpretable features sampling_rate = self.model_helper.feature_extractor.sampling_rate splits = [] - duration_s = len(audio_array) / audio.sample_rate # finds the duration from the array + duration_s = len(audio_np) / audio.sample_rate a, b = 0, 0 for e, i in enumerate(np.arange(0, duration_s, num_s_split)): diff --git a/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py index 1754f21..f59aa7a 100644 --- a/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py @@ -41,7 +41,7 @@ def remove_audio_segment(audio, start_s, end_s, removal_type: str = "silence"): # display(audio_removed) elif removal_type == "pink noise": sounds_path = (os.path.join(os.path.dirname(__file__), "pink_noise.mp3"),) - replace_word_audio = AudioSegment.from_mp3(sound_path)[:word_duration] + replace_word_audio = AudioSegment.from_mp3(sounds_path)[:word_duration] audio_removed = before_word_audio + replace_word_audio + after_word_audio return audio_removed @@ -63,17 +63,19 @@ def compute_explanation( Computes the importance of each equal width audio segment in the audio. """ - audio_array = audio.array + ## Load audio as pydub.AudioSegment + audio_as = audio.to_pydub() + audio_np = audio.array ## Remove word audio_remove_segments = [] - duration_s = len(audio_array) / audio.sample_rate # finds the duration from the array + duration_s = len(audio_as) / 1000 for i in np.arange(0, duration_s, num_s_split): start_s = i end_s = min(i + num_s_split, duration_s) - audio_removed = remove_audio_segment(audio.to_pydub(), start_s, end_s, removal_type) + audio_removed = remove_audio_segment(audio_as, start_s, end_s, removal_type) audio_remove_segments.append(pydub_to_np(audio_removed)[0]) @@ -82,7 +84,7 @@ def compute_explanation( display(audio_removed) # Get original logits - logits_original = self.model_helper.predict([audio_array]) + logits_original = self.model_helper.predict([audio_np]) # Get logits for the modified audio by leaving out the equal width segments logits_modified = self.model_helper.predict(audio_remove_segments) From 4e157ef09ad21944e89f0fa8ead2e8d80f38ccfc Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Thu, 21 Mar 2024 12:00:54 +0100 Subject: [PATCH 15/21] Make sure to use normalized arrays in the evaluators for speech --- ferret/evaluators/faithfulness_measures_speech.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ferret/evaluators/faithfulness_measures_speech.py b/ferret/evaluators/faithfulness_measures_speech.py index 95e178c..db1d61e 100644 --- a/ferret/evaluators/faithfulness_measures_speech.py +++ b/ferret/evaluators/faithfulness_measures_speech.py @@ -62,7 +62,7 @@ def compute_evaluation( target = explanation.target # Get audio as array. - audio_np = explanation.audio.array + audio_np = explanation.audio.normalized_array # Get prediction probability of the input sencence for the target ground_truth_probs = self.model_helper.predict([audio_np]) @@ -233,7 +233,7 @@ def compute_evaluation( target = explanation.target # Get audio as an array. - audio_np = explanation.audio.array + audio_np = explanation.audio.normalized_array # Get prediction probability of the input sencence for the target ground_truth_probs = self.model_helper.predict([audio_np]) From 14d9a33e395e4acb629cfb4622cae5e5c69ff0f8 Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Thu, 21 Mar 2024 15:02:54 +0100 Subject: [PATCH 16/21] Fix notebook --- examples/speech/getting_started.ipynb | 571 ++++++++++++++++++++++---- 1 file changed, 493 insertions(+), 78 deletions(-) diff --git a/examples/speech/getting_started.ipynb b/examples/speech/getting_started.ipynb index a4e62d1..19b27ca 100644 --- a/examples/speech/getting_started.ipynb +++ b/examples/speech/getting_started.ipynb @@ -9,7 +9,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -19,9 +19,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/moscato/miniconda3/envs/ferret-testing/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "torchvision is not available - cannot save figures\n" + ] + } + ], "source": [ "from datasets import Dataset\n", "from IPython.display import display\n", @@ -56,7 +66,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -79,9 +89,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cuda:0\n" + ] + } + ], "source": [ "device_str = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n", "device = torch.device(device_str)\n", @@ -91,9 +109,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at superb/wav2vec2-base-superb-ic were not used when initializing Wav2Vec2ForSequenceClassification: ['wav2vec2.encoder.pos_conv_embed.conv.weight_g', 'wav2vec2.encoder.pos_conv_embed.conv.weight_v']\n", + "- This IS expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing Wav2Vec2ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of Wav2Vec2ForSequenceClassification were not initialized from the model checkpoint at superb/wav2vec2-base-superb-ic and are newly initialized: ['wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original0', 'wav2vec2.encoder.pos_conv_embed.conv.parametrizations.weight.original1']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + } + ], "source": [ "## Load model\n", "model = Wav2Vec2ForSequenceClassification.from_pretrained(\n", @@ -116,7 +146,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -126,9 +156,27 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "## Example\n", "# 'transcription': 'Turn up the bedroom heat.'\n", @@ -159,9 +207,132 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Transcribing audio to get word level timestamps...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", + "Transcribed audio with whisperX into: Turn up the bedroom heat.\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 Turnupthebedroomheat.
action=increase0.2510.5450.2430.1300.021
object=heat-0.000-0.000-0.0000.0140.412
location=bedroom0.0020.0060.0820.9970.242
\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "explanation = benchmark.explain(\n", " audio_path_or_array=audio_path, \n", @@ -173,9 +344,136 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Transcribing audio to get word level timestamps...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", + "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", + "Transcribed audio with whisperX into: Turn up the bedroom heat.\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 Turnupthebedroomheat.
action=increase0.1550.2730.1170.2810.149
object=heat0.0550.0150.065-0.0070.211
location=bedroom-0.065-0.0050.2530.7070.036
\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "explanation = benchmark.explain(\n", " audio_path_or_array=audio_path, \n", @@ -187,9 +485,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "(EvaluationSpeech(name='aopc_compr_speech', score=[0.8124997764127329, 0.14093613624572754, 0.9970740624897493], target=[3, 4, 2]),\n", + " EvaluationSpeech(name='aopc_suff', score=[0.624854679661803, 0.01358117163181305, 0.10568535327911377], target=[3, 4, 2]))" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "aopc_compr = AOPC_Comprehensiveness_Evaluation_Speech(benchmark.model_helper)\n", "evaluation_output_c = aopc_compr.compute_evaluation(explanation)\n", @@ -200,60 +510,6 @@ "evaluation_output_c, evaluation_output_s" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Working with transcriptions explicitly" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`Ferret` offers an interface with ASR (automatic speech recognition) models from [`WhisperX`](https://github.com/m-bain/whisperX) in the form of the `transcribe_audio` function. This is called from within `Ferret` and there's no need to access it explicitly. Nevertheless, should the need arise, here's how to generate the word-level transcript (with time alignments for the audio part) used internally by the `SpeechBenchmark.evaluate` method." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from ferret.explainers.explanation_speech.utils_removal import transcribe_audio" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "text, words_trascript = transcribe_audio(\n", - " audio_path=audio_path,\n", - " device=device.type,\n", - " batch_size=2,\n", - " compute_type=\"float32\",\n", - " language='en'\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "explanation = benchmark.explain(\n", - " audio_path=audio_path, \n", - " methodology='LOO',\n", - " # Transcripts are passed explicitly.\n", - " words_trascript=words_trascript\n", - ")\n", - "\n", - "display(benchmark.show_table(explanation, decimals=3))" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -263,12 +519,159 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 24, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Perturbation type: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:28<00:00, 3.61s/it]\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 pitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoise
action=increase0.080.040.130.110.190.040.630.44
object=heat0.02-0.000.040.000.000.000.000.29
location=bedroom0.220.130.330.020.030.010.340.60
\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "explain_table = benchmark.explain(\n", - " audio_path=audio_path,\n", + " audio_path_or_array=audio_path,\n", " methodology='perturb_paraling',\n", ")\n", "display(benchmark.show_table(explain_table, decimals=2))" @@ -283,22 +686,34 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ "perturbation_types = ['time stretching', 'pitch shifting', 'reverberation', 'noise']\n", "variations_table = benchmark.explain_variations(\n", - " audio_path=audio_path,\n", + " audio_path_or_array=audio_path,\n", + " current_sr=16e3,\n", " perturbation_types=perturbation_types\n", ")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 28, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcUAAAGZCAYAAAD4jSoIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAACZvklEQVR4nOzdd1xV9f/A8dcF4YIXZIgDFHGiKG7NLQg40NRMcZZilpnmwj0KtRylJmX5teFX86uWojhSSTIhM3HUT8tclRtHLoYLFPj8/iBOXtnIlPezx3nkPffz+ZzPORfum885n6FTSimEEEIIgUlhV0AIIYQoKiQoCiGEEP+QoCiEEEL8Q4KiEEII8Q8JikIIIcQ/JCgKIYQQ/5CgKIQQQvxDgqIQQgjxDwmKQgghxD8kKIoiZ8uWLSxbtizNfn9/f9zd3QuhRtkXExPDrFmzOHHiRJ6Wm9E1ycr58+fR6XRs3Lgx03SzZs3Cysoqt9UT4pkhQVEUORkFgLfeeot169YVQo2yLyYmhtmzZxeZoJhdr776KuHh4flWvhDFRanCroAQ2VWjRo3CrkKeevDgAZaWloVdDQAqV65M5cqVC7saQhQ6aSmKIsXf358vv/yS48ePo9Pp0Ol0+Pv7a+89fvt01apV6HQ6fv75Zzp16kTp0qWpXbs2u3fvJjk5mZkzZ1KhQgUqVKjAtGnTSE5ONjrWyZMn6dmzJzY2NhgMBrp168aZM2eyrOOCBQuoWbMmFhYWlCtXDh8fH86dO8f58+epVq0aAH5+flr9z58/r93GXLVqFa+99hply5blueeeAyAhIYHp06fj4uKCXq/Hzc3NqEWc2TUBiIyMpFOnTpQpUwZra2tatGjBd999Z1Tn+Ph43nzzTezs7HB0dGTixIkkJiZq7z95+zQiIgKdTsd3333HwIEDsba2xsXFhffffz/N9fj0009xcXGhdOnSdOzYkSNHjmjnKkRxIy1FUaS89dZb3Lhxg1OnTrF27VoAypUrl2mewYMHM2LECCZMmMCCBQt48cUXGTJkCHFxcaxevZqDBw8SGBhI/fr1GThwIABnz56ldevWuLu7s2rVKkxMTJg7dy7e3t6cPn0avV6f7rFWr17NW2+9xZw5c2jVqhWxsbH8+OOPxMXFUadOHUJCQnjxxReZN28eHTp0AMDR0ZGrV68CMG3aNLp168ZXX32lBem+ffuyb98+AgMDcXNzY+fOnbz00kvY2dnh6+ub6TX56aef8PLyomXLlnzxxRfY2try888/c/HiRaN6z5gxg549e7Jhwwb279/PrFmzqFmzJiNGjMj02o4YMYKXX36ZzZs3s2XLFqZMmUKDBg3o0qULANu2bWPEiBG8+uqr9OnTh6NHj9K3b99MyxSiSFNCFDFDhgxR9erVy3L/ypUrFaCWLVum7Tt27JgCVMuWLY3yNm3aVL3wwgva68GDB6vq1aurBw8eaPuuX7+urKys1CeffJJh3UaNGqWaNGmS4fvnzp1TgAoODk53f5cuXYz279mzRwFq165dRvv79eunmjdvnuG5p2rdurWqW7euSkxMzLQ+fn5+Rvs9PDyUt7e39jowMFAZDAbtdXh4uALUpEmTtH3JycmqatWqatiwYdq+5s2bKy8vL6Oy33nnHQWolStXplsnIYoyuX0qir2OHTtq/3Z1dQXA29vbKI2rqyuXLl3SXoeFhdGjRw9KlSpFYmIiiYmJ2NnZ0bhxYw4fPpzhsZo0acKRI0cICAhg3759PHr0KEd17datm9HrsLAw7O3t8fLy0uqRmJio3YZMSkrKsKz79+9z4MABhgwZgqmpaabH7dSpk9HrunXrEhUVlWV9H8+n0+lwc3PT8iUlJXHkyBF69OhhlKdnz55ZlitEUSVBURR7tra22r/Nzc3T7EvdHx8fr72+efMmQUFBmJmZGW0//vijUfB8kr+/P0uWLGHXrl20a9eOcuXKMXbsWB48eJCtulaoUMHo9c2bN7l9+3aaerz66qskJiZqt13TEx0dTXJyMk5OTlkeN6vrkZt8N27cIDExMc3t7fLly2dZrhBFlTxTFCWSvb093bp1Y+TIkWnes7a2zjCfiYkJY8eOZezYsVy+fJmvv/6aqVOn4uDgwFtvvZXlcXU6XZp6lCtXjp07d6abPrMAY2tri4mJCVeuXMnyuPmhXLlylCpVihs3bhjtv379eqHUR4i8IEFRFDnZbcU8DR8fH37//XcaN26c5a3HjFSqVIkJEyawbt06Tp48CfzbUs1u/X18fHj//fcxNzenQYMGGaZL75oYDAZatWrF6tWrmTBhQq7PI7dMTU1p3LgxW7duZezYsdr+LVu2FGg9hMhLEhRFkePm5sZ///tfvvrqK2rVqoWDgwNVq1bN02PMnj2b5s2b07lzZ4YPH06FChW4du0aP/zwA+3atWPAgAHp5nv99dexs7OjZcuW2NnZ8dNPP/Hrr79qLc6KFStia2vLV199RbVq1dDr9ZkGu44dO9K9e3e6dOnC5MmTadCgAffu3eP48eP89ddffPHFF5lekwULFuDl5YWPjw8jR47Ezs6O//u//8PBwYFXXnklT69ZembOnEnPnj157bXX8PPz48iRI3z55ZdASqtaiOJGfmpFkTNs2DD8/PwYPXo0zZs3Z9asWXl+jJo1a3Lo0CHKli3LyJEj6dy5M1OnTuXevXuZBrHWrVuzb98+hg0bRpcuXVi7di1Llixh2LBhQEogWLlyJefOncPb25vmzZtneXtz48aNjBgxgmXLluHr68uwYcMICwvDw8NDS5PRNWnbtq02ptDf358XX3yRzZs34+Li8vQXKRt69OjBf/7zH3bt2kXPnj0JDQ3lP//5DwA2NjYFUgch8pJOKaUKuxJCiGfHihUrePXVVzl37lyet/CFyG9y+1QIkWu3b99m9uzZeHl5YW1tzeHDh5k7dy49e/aUgCiKJQmKQohcMzMz48yZM6xbt46YmBjKlSvHyy+/zHvvvVfYVRMiV+T2qRBCCPEP6WgjhBBC/EOCohBCCPEPeaaYj5KTk7ly5QrW1tZpZjIRQognKaW4c+cOTk5OMs6zkEhQzEdXrlzB2dm5sKshhChmLl26JIs+FxIJivkodQ7NDQ4jKW2S/vp8GWm47OVcHdOySu4GTKtyFrnKB6BLzl1frSTLXP74lYA/oE3vJmadKB1JpXN3TXP7GRY0ZVrwd1x0Sbm7Nrmp6507d6jpWivT+XdF/nqqoOjv78/PP//M77//nlf1eaak3jItbaLHkMOgWKa0VdaJ0mFplbtfJlXGMlf5IPdfGrn9Ai8RQVGXy6BokKCY1woyKGrHlMctheapguJbb73FvXv38qouQgghRKF6qqBYo0aNvKpHph48eIClZe5bMkIIIUR2PNWNKH9/f9zd3QFYtWoVOp2OI0eO4Ovri8FgoFatWqxevTpNvh07dtCmTRtKly6NnZ0dnp6eHDlyBECb3HjHjh306dOHMmXK4OfnB0BMTAwjR47E0dERvV5P06ZNCQsLS1N2x44dKV++PGXKlKFFixZ8++23RmliYmJ47bXXqFSpEhYWFjg7O9O/f3+jNFFRUbz00ks4ODhgaWlJ+/bt+eWXX57mcgkhhCji8vzpzKBBg+jUqRNbtmyhcePG+Pv7a2vNAaxfv57u3btTvnx51q1bx9q1a2nTpg2XL182Kmf48OHUqFGDzZs3M3HiRB4+fEjHjh3Zvn07c+fOZdu2bdStW5du3bpx7NgxLd+5c+fo3r07//vf/9i0aRNt2rSha9euREREaGkCAgLYvn078+bNY9euXSxcuBC9/t9nftHR0bRt25ajR4+ydOlSNm3ahMFgwMvLK9MFVBMSEoiLizPahBBCFB953vv0zTff1NaWa926NTt27GDTpk3MnDkTpRQTJ06kU6dObN68WcvTtWvXNOX06NHDaP7ElStXcvToUX799Vfq1q0LQOfOnfnzzz9555132LBhg3b8VMnJyXTo0IHjx4/z2Wef4enpCcChQ4cYOHAgQ4YM0dI+3lIMCgoiJiaGQ4cOaSufe3t74+rqyqJFi3j//ffTPff58+cze/bsHF0vIYQQRUeetxQ7deqk/dtgMODi4kJUVBQAp0+fJioqKluLn3br1s3odVhYGPXr18fV1ZXExERt69ixI4cPH9bSRUVFMWTIECpVqkSpUqUwMzMjLCyMP/74Q0vTpEkTVq1axaJFi9LtORsWFkaHDh2wt7fXjmNqaoqHh4fRsZ40bdo0YmNjte3SpUtZnqcQQoiiI89bira2tkavzc3NiY+PB+DWrVsAODk5ZVlOhQoVjF7fvHmTI0eOYGZmliatqakpkNIy7NGjB7GxscyZM4eaNWtiMBh4++23uXjxopZ+6dKl2Nvbs3jxYiZNmoSzszPTpk3jjTfe0I514MCBdI+VWecivV5vdBtWCCFE8VKgg/fLli0LkOVK5JB2nI69vT0NGjRgxYoVGeb566+/OHLkCFu2bKFnz57a/gcPHhils7GxISgoiKCgII4dO8aHH37IyJEjcXd3p127dtjb29OlSxfeeeedNMeQoCeEEM+uAg2KtWvXpnLlyqxcuZK+ffvmKK+Pjw87d+7Eyckpw5ZmavAzNzfX9l24cIGffvoJV1fXdPPUr1+fJUuWsGLFCk6ePEm7du3w8fFhzZo1uLm5YTAYclRPIYQQxVeBBkWdTseiRYsYMGAAvXv3ZvDgwej1eiIjI2nevDnPP/98hnkHDx7Mp59+iqenJxMnTsTV1ZWYmBiOHDnCw4cPmT9/PnXq1KFy5cpMnTqVpKQk7t69S2BgIJUqVTIqq02bNvTq1Qt3d3dMTU1ZvXo15ubmtGvXDkjpnbp27Vo8PDwYO3YsVapU4caNGxw8eBAnJyfGjx+fr9dJCCFE4SjwuU/79etH6dKlmTt3Lv3798fCwoImTZrQq1evTPPp9Xr27NnDrFmzmDt3LlevXsXBwYHGjRtrvV31ej0hISGMGjUKPz8/nJ2dmTlzJnv27OHnn3/WymrTpg2rV6/m3LlzmJiYUL9+fb755hvc3NyAlNu8Bw4cYObMmUyZMoVbt25Rvnx5WrZsmWU9hRBCFF86pVTxmPSwGIqLi8PGxoa/r12jTJkyOcqb2w8ltzMmloQfgoK+NiVh9kq5NnkrLi6OChUrEhsbm+PvDJE3SsDUykIIIUT2FNug+PgUc/ktJiaGWbNmceLEiQI5nhBCiMJRbINiQYqJiWH27NkSFIUQ4hknQVEIIYT4R7EPihERETRu3BiDwcBzzz1ntJKFUopFixbh6uqKXq+nevXqLFmyxCj/qVOn6N+/P87OzpQuXZq6deuyePFikpOTATh//jzVqlUDwM/PD51Oh06n4/z58wV2jkIIIQpGgQ/JyEvXrl1jzJgxTJ06FRsbG6ZNm0avXr04c+YMZmZmjB07li+++IIZM2bQokUL9u/fz5QpU7C0tGTEiBEAXL58mdq1azNo0CCsra05evQogYGB2hhHR0dHQkJCePHFF5k3bx4dOnQAwNHRMU19EhISSEhI0F7LKhlCCFG8FOugePv2bX744Qfq1asHpExA3qFDBw4ePIijoyMff/wxy5cvZ/jw4UDKrDj3799n9uzZDB8+HBMTE7y9vfH29gZSWpZt27bl/v37fPzxxwQGBqLX62ncuDEAtWrVomXLlhnWR1bJEEKI4q1Y3z51cnLSAiKgLSkVFRXF7t27Aejdu7fRqho+Pj5cu3ZNW8EiPj6ewMBAatasiV6vx8zMjBkzZnD16lXu3r2bo/rIKhlCCFG8FeuWYnorckBKoLt58yZKKRwcHNLNe+nSJVxcXJgyZQqff/45gYGBNG3aFFtbW7Zu3cq7775LfHw8VlZW2a6PrJIhhBDFW7EOipmxt7dHp9Oxb98+ownCU9WuXRuA4OBgXn/9daZMmaK9t2PHjgKrpxBCiKLjmQ2Kqc8Jb926Rffu3TNM9+DBA6OgmZSUxNdff22U5vEWqBBCiGfXMxsUXV1dGTVqFC+//DKTJk2iRYsWPHr0iD/++IPw8HC2bNkCQMeOHfn888+pW7cuDg4OLFu2zKgHKUDFihWxtbXlq6++olq1auj1eho0aJBuC1QIIUTxVaw72mTlo48+4t133+Xrr7+mW7duvPTSS6xfvx4PDw8tzdKlS/Hw8GD06NEMGzaM+vXrM336dKNyTExMWLlyJefOncPb25vmzZtna6FkIYQQxYuskpGPUlfJuPlXFGWsczbjfZIhd414kwdJucqXbDDNVb6nISssZKK4/FYWpw8jKZcX1TSXJ5mLw8XFxVHBUVbJKEzPdEtRCCGEyIkcBcX8XpmiUaNG+Pv751v5QgghRGakpSiEEEL8o1gFRaVUmp6hQgghRF7JVVAMDQ3F3d0dCwsLmjZtyoEDB4zeX7VqFQ0aNMDCwoJKlSoxY8YMkpKMO4Ds37+fpk2bYmFhgbu7O6GhoWmOk3q7dufOnTRs2BC9Xs8333wDQEhICI0aNcLCwgInJycCAgLSjCO8cOECffr0wcbGBoPBQOfOnTl27JhRmqpVq/Lmm28SFBSEs7Mz1tbW+Pv7k5CQwNGjR2nTpo22AseTeYUQQjxbctzF8erVq4wcOZJZs2ZhZ2fHggUL6Ny5M3/++Sfly5fngw8+YPLkyYwfP57Fixdz8uRJLSguWLAASFndonPnztSvX58NGzYQHR3NG2+8wb1792jUqJHR8a5cucKYMWOYOXMmVapUoUqVKmzbto0+ffrQv39/FixYwKlTp5g+fToXL15k48aNANy5cwdPT09MTExYvnw5FhYWzJ07l/bt2/Pbb7/h7OysHWPr1q24u7vz6aefcvbsWQICAjA3NycyMpKAgAAqVKjAlClT8PPz48SJE5iYpP+3hKySIYQQxVuOg+Lt27cJDg7Gy8sLAA8PD5ydnVmyZAnTp08nMDCQyZMnM2/ePCBlcLy5uTkBAQFMmjSJsmXLEhQUhE6nIzQ0FBsbGwCcnZ21WWgeFx0dTWhoKC1atND29e3bl5YtW7Ju3ToAunTpQunSpXn99dc5duwY9evXZ+XKlVy4cIHjx4/j5uam1bVKlSoEBQWxePFio+Ns3bpVG4wfERHB559/TmhoKF26dAEgOTmZ7t27c+zYMRo2bJjutZFVMoQQonjL8e1TGxsbLSCmvvbx8eHgwYPs37+fu3fv4ufnl2ZligcPHvD7778DcPDgQTp06KAFRAAvLy/s7e3THK9s2bJGAfHu3bscPXqUPn36GKXr168fAPv27QPgxx9/xN3dXQuIkDIfaseOHbU0qTw8PIxmp3F1dcXExMToPF1dXQEyXflCVskQQojiLcctxXLlyqXZV6FCBU6ePMnNmzcBaNKkSbp5U4PE1atXqVmzZpr3y5cvn27Zj4uJiUEplWa/jY0Ner2e27dvAyktzCfTpJaXGpxTpbfahqWlpVGgzM78p7JKhhBCFG85Doo3btxIs+/vv//G0dFRa+mFhIQYPbNLVa1aNSBl1frr16+neT+9fTqd8WwStra26HS6NGljY2NJSEjQ6mBvb8/p06fTrWt6LVIhhBAix7dPY2Nj2bNnj9Hr3bt306JFC1q1akXp0qWJioqiWbNmabayZcsC8NxzzxEeHk5sbKxWzp49e7RWXmasrKxo1KiR1qEm1YYNGwBo27at9v9jx44ZBcbo6Gh2796tpRFCCCEel+OgaG9vz7Bhw1i9ejXbtm3D19cXpRTjxo3D1taWOXPmMHnyZKZMmUJoaChhYWEsX74cX19f7t+/D8C4ceNITk7G19eXbdu28eWXX/LKK69oQTMrs2bNIjIykpdeeolvv/2WDz/8kHHjxtG7d2/q168PwNChQ3FxcaFbt258/fXXbNmyhU6dOlGqVCnGjRuX09MWQghRAuT49qmjoyPvvfcekyZN4syZM9SrV49du3Zpz+8mTJhApUqV+OCDD1i6dClmZmbUqFGD559/Xnsu5+joSGhoKGPGjMHPz48aNWrwySefMGPGjGzVoUePHgQHBzNnzhx69uyJvb09w4cPZ/78+Voaa2trIiIiCAgIYPjw4SQlJdGmTRv27t2b7q3d/JA61/qdO3dynDcpKZcTgsfnckLwJJkQvEiRCcHzXjGYEDz1u0LWaSg8skpGPoqKiiqwACyEeHZcunSJypUrF3Y1SiQJivkoOTmZK1euYG1tnabDUFxcHM7Ozly6dCnHS8TkNq/kKxr5ilNdJV/BHlMpxZ07d3BycspwkhCRv3J3j05ki4mJSZZ/7ZUpUybX66blNq/kKxr5CuOYki9v8+XHMR8fvy0KnvwpIoQQQvxDgqIQQgjxDwmKhUSv1xMYGJirGXBym1fyFY18hXFMyZe3+QrrmCL/SUcbIYQQ4h/SUhRCCCH+IUFRCCGE+IcERSGEEOIfEhSFEEKIf0hQzAOnTp2iY8eOGAwGKlasyOTJk3n48GGW+apWrYpOp0uzPb5mY0RERLpp+vfvX2h1z6hOOp2OOnXqFEqdAG7dusWIESOoUqUKBoMBd3d3li9fnq2659X1LKr++usvRowYQaNGjShVqhTu7u7Zyrds2TKef/55ypUrh06nS7M6DeT/Nc1t3V966SVq1aqFwWDAzs6O9u3bExYWlid1epp6PS4oKAidTsfzzz9vtL+k/pwWBTKjzVOKjo7Gy8uLWrVqERISwuXLlwkICOD+/ft8/PHHWebv06cPEyZMMNqXXlftlStXGgUcBweHQqt7kyZNiIyMNNoXFxeHr68vvr6+hVInAD8/P06dOsW8efOoUqUKO3fu5I033sDU1JTXXnvNKG1+XM+i7Pjx4+zYsYMWLVqQnJxMcnJytvKtXr0agK5du2r/zkh+XdPc1v3hw4cEBARQq1Yt4uPjWbFiBV27diU8PJx27doVWr1SXbt2jdmzZ6e7uHqqkvZzWiQo8VTmzZunDAaDunXrlrbv008/Vaampury5cuZ5nVxcVGjRo3KNE14eLgC1OHDh/Okvo97mro/aeXKlQpQhw4dKpQ6Xb16VQFq5cqVRvvbt2+vvLy8tNf5eT2LsqSkJO3fQ4YMUfXq1ctRvnPnzilABQcHp0mT39c0t3V/UmJionJ2dlavvfZakajXyy+/rAYPHqw8PDxUt27djN4rqT+nRYHcPn1KoaGh+Pj4YG9vr+3r27cvycnJeXqrJj/kZd3XrVtHrVq1aN68eaHU6dGjR0DaeSNtbGyKxDI8oaGh2i2wx5dI69WrFzqdDoPBwB9//JFvx8/t5NJFYVLqvKqDqakptra22boVnx1PU699+/axZcsWFixYkCd1EXmn8H/ii7lTp06leY5ma2uLo6Mjp06dyjL/2rVr0ev1WFlZ0bVrV44dO5Zuuq5du2JqakrlypWZNGkSDx48KPS6p/r777/Zs2cPAwcOLLQ6OTs706lTJ+bNm8eJEye4c+cOGzZsICwsjFGjRqVJnx/XMzO+vr4MHz4cgIULF3Ls2DE2bNjAli1bAHjvvfdwdXXN1zrkt4K+ptmhlCIxMZFbt26xaNEi/vzzT15//fVCrVNSUhJvvvkmM2bMwNHRMdO0RfGaPuvkmeJTio6OxtbWNs1+Ozs7bt++nWneHj160KJFC6pUqcLZs2eZO3cubdu25ciRI1SvXh1IaelMnjyZ9u3bY2lpyZ49e1i0aBEnT55k+/bthVb3x61fv56kpKQ8CYpPU6eQkBD69etHvXr1gJSWwdKlS+ndu7eWJj+vZ1YWL17M7t27OXv2LEOHDuXSpUsA+Pj4pBu4i4vCvKZZWbFihfY82crKivXr19OqVatCrdOyZcu4d+8e48ePzzBNUb6mzzoJioXoo48+0v7drl07OnXqRJ06dVi0aBHLli0DoHHjxjRu3FhL5+XlhaOjI2+++SaHDh3iueeeK/B6P2nt2rU0bdq0UFs6SimGDh3Kn3/+ybp163B0dOS7775j3Lhx2NnZab32CvN6WllZsXr1atq3b88vv/wCpHz5rVy5Ms16m8VJUf4ZfeGFF2jUqBE3b94kODiYvn37snnz5qfuEJZb169f5+2332b16tWYm5tnmK4oX9Nnndw+fUp2dnbExsam2R8dHW30XCw7HB0dadu2rfaFmZG+ffsCZJkuK3lR9zNnznDo0CEGDRr0VHV52jrt2LGD4OBgNm7cyIABA/D09GTu3LkMHjw4Te/eJ+XV9cyONm3a0KxZM+11jx49nskV1gvymmbGwcGBZs2a0aVLF1asWIGvry+TJk0qtPq8/fbbNGjQgHbt2hETE0NMTAyJiYkkJiZq/85IUbmmzzoJik+pTp06aZ51xcbGcvXq1TwZs5ef8qLu69atw8TEJM/GT+W2TidOnMDU1DTNWLHGjRtz5coV7t+/nyf1e1pffvklhw4d0l6vXbuW/fv3F2KNSpamTZvy119/FdrxT506xd69e7Gzs9O2n376iV27dmFnZ8fu3bsLrW4ihQTFp+Tr68vu3buJiYnR9gUHB2NiYkKnTp1yVNaVK1fYt29flj04v/76a4Cn7umZF3X/6quv8PT0zLLDQH7XycXFhaSkJH777Tej/b/88gvly5endOnSGebNq+uZlUuXLjF27FggpYXo5uZGcnIyQ4YMKTJBO68U1DXNqX379mnP6wtDUFAQ4eHhRlvDhg1p2bIl4eHhmd4WLarXNDO5neBAKcWCBQuoUqUKlpaWtGrVigMHDuRzbf89uHgKt2/fVo6OjsrDw0Pt2rVL/fe//1W2trZpxh96eXmpGjVqaK/XrVunBg4cqNasWaP27NmjvvjiC1WjRg1lZ2enzp49q6UbNGiQCgwMVFu3blW7du1SU6ZMUebm5uqFF14otLqn+r//+z8FqC+++OKp6/K0dYqLi1NVqlRRNWvWVP/73//U7t271eTJk5WJiYl65513tHT5eT0zk5ycrLy9vRWg7Ozs1NWrV1VkZKQyMTFRgBo5cmS+Hv/evXsqODhYBQcHK09PT+Xs7Ky9vn79ulIq/c/58OHDKjg4WC1btkwBasKECSo4OFhFRERoafL7muam7tu3b1d9+/ZVq1evVuHh4WrTpk2qd+/eClBfffVVodUrPemNUyysn9O8tmXLFlW5cmXVu3dvVb9+/WyP5Zw/f74yNzdXH3zwgdq9e7fq1auXsra2VmfOnMnnGislQTEPnDhxQnl7eytLS0tVvnx5NXHiRJWQkGCUxsPDQ7m4uGivIyMjlaenp3JwcFClSpVSDg4Oqm/fvurUqVNG+ebNm6fq1aunrKyslJmZmXJ1dVWzZs1KU35B1j3VxIkTlV6vV9HR0XlSl6et059//qn69u2rnJycVOnSpVW9evVUUFCQSkxM1NLk9/XMyNKlSxWgALVq1Spt/4QJExSgdDqdCgsLy7fjpw6+T28LDw9XSqV/TYcMGZJuHg8PDy1Nfl/T3NT95MmTqmfPnsrJyUmZm5srJycn1aVLF6NgXhj1Sk96QbGwfk7zWm4mOHjw4IEqU6aMmjZtmrYvISFBubi4qDfeeCNf6vk4WWRYCCFEvvP39+fnn3/m999/zzTdnj178Pb25siRIzRq1EjbHxAQQEhICOfPn8/XesqQDCGEKMHi4+OzPcuPUirN8CG9Xp/ufM25ldrR7smOdW5ubly8eJEHDx5gaWmZZ8d7kgRFIYQooeLj46lgaUcc8VknJmWs7d27d432BQYGMmvWrDyrU3R0NHq9HgsLC6P9dnZ2KKWIjo6WoCiEECLvPXz4kDjimW3SDQvMMk0bzyMC7+7g0qVLlClTRtufl63EokCCohBClHCldXosdZkHRZN/1o8oU6aMUVDMa3Z2diQkJBAfH2/UWoyOjkan02FnZ5dvxwYZp5gjS5YsoUqVKpiamvLCCy/kSZlBQUHs3LkzT8oSQhQMf3//XC0qXFSZ6kyytRWE1GeJp0+fNtp/6tQpbdxifpKgmE1//vknEyZMYNCgQfz444+8//77eVKuBEUhRGEz0emytRWE1q1bU6ZMGYKDg7V9jx49IiQkhK5du+b78eX2aTadPn0apRSvvfZaoc6IkZX87pklhCj6cvo9kBL0Mm8jmZDzoHj//n3tj/4LFy4QFxfHxo0bAfDw8KBcuXJ4e3tz4cIFbfo9CwsLpk2bxqxZsyhXrhz169dn2bJl3Lp1i4kTJ+a4DjklLcVs8Pf3p3v37gDUqFEDnU7HJ598wptvvknt2rUpXbo0VatWZcSIEWkms962bRvNmjXDysoKW1tbmjVrpv2QVK1alQsXLvDJJ59oC9CuWrVKy7tq1SoaNGiAhYUFlSpVYsaMGSQlJRm9r9PpiIyMpGPHjhgMhkKd7FiIZ8nx48fp2rUrZcuWpXTp0tSuXTvTO0THjh2jc+fOGAwGbGxs6NOnDxcvXtTeHzZsGO3atdNe37x5ExMTE6Np2+7evYuZmZlRK+nkyZP07NkTGxsbDAYD3bp148yZM0bH1ul0LFiwgClTplCxYkXKly+fo3MtZWKarS2nrl+/jp+fH35+fkRERHDp0iXt9fHjx4GU9SWfnAh9ypQpBAYGsmjRIrp27UpUVBS7du0qkAaJtBSz4a233qJu3bpMmTKFkJAQHB0dqVGjBm+//TZz586lXLlyXLp0iblz5/LCCy8QHh4OpKwg0adPHwYMGMD8+fNJTk7m119/JTo6GoDNmzfTtWtX2rZtq63kUKNGDQA++OADJk+ezPjx41m8eDEnT57UguKTq3UPHDiQ4cOHM3369Ezn+BRCZF/37t2pUKECK1aswMbGhr/++ouoqKh00166dIn27dtTo0YN1qxZQ3x8PDNmzMDDw4PffvsNa2tr2rdvz9q1a7UOJHv37kWv13PkyBHu3LmDtbU1+/fvJzExkfbt2wNw9uxZWrdujbu7O6tWrcLExIS5c+fi7e3N6dOnjXp+fvjhh7Rs2ZIVK1ZkutpGekx0JtloKea8DVW1alWymh8mIiIizT6dTse0adOYNm1ajo/51PJ9zpxnxObNmxWgzp07l+77jx49Uvv27VOAOn36tFJKqeDgYAWouLi4DMt1cXFJM69nXFycsrKyMprmSCml/vOf/yhLS0t18+ZNpZRSK1euVIBasGDBU5yZEOJJN27cUIDatm1buu8/OWXZ+PHjlcFgULdu3dL2nTx5Uul0OvXRRx8ppZQ6e/asArSp5saOHasGDBigypYtq0JDQ5VSSs2YMUO5urpqZQwePFhVr15dPXjwQNt3/fp1ZWVlpT755BNtH6Dq1q2rkpOTc3SesbGxClCflX5ZrTEMy3T7rPTLClCxsbE5OkZxI7dPn8L//vc/GjdujJWVFWZmZrRt2xaAP/74A4AGDRpgamrKwIED+eabb9JdJzA9+/fv5+7du/j5+WlrrSUmJuLj48ODBw/STJPUrVu3vD0xIUq4smXL4uLiwrRp0/jyyy8zbCGm+vHHH/Hy8jJa87NOnTo0bNiQffv2AVCtWjUqV67M3r17Adi7dy+enp60a9eOH374QduX2koECAsLo0ePHpQqVUr7HrCzs6Nx48YcPnzYqA6+vr65Xqw6taWY1VYSlIyzzAebN29m8ODBPPfcc2zYsIEDBw6wefNmIGWWCABXV1e2b99ObGwsvXr1oly5cvTo0cPoOUN6bt68CUCTJk0wMzPTtlq1agEpt2oeV6FChbw+PSFKNJ1OR1hYGG5ubowaNQpnZ2eaNWumBbQnRUdHp/t7WKFCBW7fvq299vDwYO/evcTFxfHrr7/Svn172rdvz969e0lISODQoUNGQfHmzZsEBQUZfQ+YmZnx448/5un3QFHqfVrY5JliLgUHB9OoUSM+/fRTbV/qX3uP69KlC126dCEuLo5vv/2W8ePHM3ToUL7//vsMy079azMkJARnZ+c071erVs3odW7/OhRCZMzV1ZXg4GAePXrE/v37mT59Ot27d+fy5ctp0trb23P9+vU0+//++29cXV211+3btycgIICIiAgcHByoU6cO9+7dY8qUKYSHh5OQkGDUGcfe3p5u3boxcuTINGVbW1sbvX6a74H8eqZYHElQzKUHDx5gbm5utG/t2rUZpi9Tpgx9+/bl4MGDfPXVV9p+c3NzrWWZqlWrVpQuXZqoqCh69eqVtxUXQuSImZkZHh4eTJ06lR49enDlypU0adq2bctnn31GdHS0NuPK6dOn+e2333jllVe0dO3bt+fevXt88MEHWouwUaNGWFpasmDBApydnalataqW3sfHh99//53GjRtjaprz3p/ZVcrElFK6zMsvpfLv+EWJBMVc6tixI6NGjeKdd96hVatW7Ny5M03r79NPPyUyMpIuXbrg6OjIuXPnWLNmjdEK8m5ubuzZs4fvvvsOOzs7qlWrRtmyZZkzZw6TJ08mKioKT09PTE1NOXv2LFu3bmXTpk3Sy1SIfPTbb78xYcIE+vXrR40aNYiNjWX+/PlUrVpV6yH+uPHjx7Ny5Uo6derEjBkziI+PZ+bMmVSpUgV/f38tXZ06dShfvjw//PADH330EQCmpqa0adOG0NBQBg0aZFTu7Nmzad68OZ07d2b48OFUqFCBa9eu8cMPP9CuXTsGDBiQJ+ebnRlrTEtIS7FknGU+eP3115kwYQJLly7lxRdf5NKlS6xbt84oTYMGDbh58yYBAQF06tSJwMBABgwYwLJly7Q08+bNo3LlyvTu3ZvmzZvzzTffADBhwgRWrlxJeHg4vXv3xs/Pj88++4zmzZunaaEKIfJWxYoVqVixIvPnz8fX15fXX38dZ2dnwsLC0m2xOTs788MPP2BnZ8egQYMYPnw4DRs2JCIiIs1tztQW4uPPDj08PNLsA6hZsyaHDh2ibNmyjBw5ks6dOzN16lTu3btHgwYN8ux8UwfvZ76VjMc0ssiwKNK2bNnClStX0n2mklsxMTEEBQXRt29f6tatm6O82V0oVafTsXDhwgKZgSPVunXrCAwM5Pz589SrV4+jR4/mSbnnz59n1apVDB8+HCcnpzwpUxQNcXFx2NjYEGz3BqVNMl/t4n5yAn7R/yE2NjZfJwQvbNJSFEXali1bjFrWeSEmJobZs2dz4sSJPC33cZGRkWluheWnu3fv8sorr9C2bVsiIiL43//+l2dlnz9/ntmzZ6f7LE08G3TZGI6hKyFDMuSZongmKKV4+PBhkVnbrWXLlgV6vPPnz5OQkMDLL79MmzZtCvTYOZWQkICZmRkmJiXjS7Y4yFbv0xISFEvGWYoiK7P5Jf39/fnyyy85fvy4NjdsaqeF1KV7du7cScOGDdHr9drz2MjISLy8vLQ5KAcOHKh1lz9//rw2pMXPz08r9/z580DKF/bMmTOpXr06er2eypUrG3WUSBUREUHjxo0xGAw899xz/PLLL0bv63Q6Fi1apL329PTk+eefZ+PGjdSuXRsrKyu8vLzSzGEZFRXF888/T+nSpXF2dmbJkiWMGzfOqEfik2bNmkX9+vUB8Pb2RqfTaSuhT506lfr162NlZUWlSpUYMGAAV69eTVPGjh07aNOmDaVLl8bOzg5PT0+OHDlCREQEHTp0AKB58+ba9Up14cIF+vTpo83L2blzZ44dO2ZUdtWqVXnzzTd5//33cXFxwdLS0mjsnih8Mk7xX9JSFIUqs/kl33rrLW7cuMGpU6e04S7lypXT8l65coUxY8ZovfyqVKlCZGQknp6edO3alfXr13Pv3j1mzpxJz549iYyMxNHRkZCQEF588UXmzZunfeE7OjoC0Lt3b/bs2cP06dNp2bIlN27cICQkxKjO165dY8yYMUydOhUbGxumTZtGr169OHPmDGZmGS/UevToURYuXMiCBQtISkoiICCAl156icjISCCltduzZ0/+/vtvPv30U2xsbFi4cCEXLlzItFX16quvUqNGDQYPHswnn3xCkyZNqFy5MpAyIfP06dNxcnLixo0bLF68GA8PD06cOEGpUim//uvXr2fAgAH07NmTdevWYW5uzk8//cTly5dp3749n3zyCaNGjWLlypXaWncAd+7cwdPTExMTE5YvX46FhQVz586lffv2/Pbbb0ZjbDdt2kStWrX48MMPMTU1xWAwZPGTIQqSmUkpzEwyDwdmJGX6/jOjcGeZEyVZVvNLKpV2jsnH9wPqwIEDRvvbt2+vWrdubTQH5PHjx5VOp1M7duxQSil17tw5Bajg4GCjvGFhYQpQ69aty7Q+Op1O/f7779q+8PBwBagff/xR2weohQsXaq89PDyUwWBQ169f1/alzl176dIlpZRSO3bsUIDau3evlubOnTvKxsZGubi4ZFgnpZQ6cuSIAlR4eHiGaRITE1VUVJQC1K5du5RSSiUnJ6vKlSurzp07Z5gv9fwOHz5stP/DDz9UOp1OnThxQtt369YtZTAYVEBAgLbPxcVFlS1bVt29ezfTcxAFL3Xu09AKE9Rex+mZbqEVJsjcp0Lkp5zOL5le/hYtWmiv79+/z08//YSfn5+2HE1iYiKurq44OzunmSvySd9//z2lS5emf//+maZzcnKiXr162uvUHqxZ1b9Ro0ZGLd0n8x0+fBhbW1ujGU2srKzw9vbOtNzMhIaG0rp1a2xsbChVqpTWgkydn/f06dNERUUZDTDPrh9//BF3d3fc3Ny0ffb29nTs2FGb7zOVp6entA6LMJn79F8l4yxFkZTT+SWf9ORcj9HR0SQlJTF+/Pg0c0VevHgxzVyRT7p16xaOjo5ZTpdla2tr9Dp13OiTMxPlNN/Vq1eNgmaqnK6Nl+rw4cP06NEDJycn/ve//xEZGcmBAweMjnnr1i2AXA21yO58n6n7RNGlMzXJ1lYSyDNFUagym1/Sysoq07xPBi9bW1t0Oh3Tp0/nhRdeSJPewcEh0/LKli3L1atXUUoVynyyjo6O3LhxI83+9ObUzI7NmzdjY2PDhg0btGeSFy5cMEpTtmxZgFwNt7C3t+f06dNp9v/9999Gq0WAzM9b1OlMdeiy6A1cUj7DkhH6RZH3+PyScXFx2pd0enPDZsRgMNCqVStOnjxJs2bN0mypPTgzatn5+Phw//59NmzYkHcnlgPNmzcnJibGqKV89+7dTCePz8yDBw8wMzMz+jJ7cn7e2rVrU7lyZVauXJlhORldr7Zt23Ls2DGjwBgdHc3u3bu1ZdRE8aArZZKtrSSQlqIoNNmZX9LNzY3//ve/fPXVV9SqVQsHB4dMhycsXLgQLy8v+vXrR//+/bGzsyMqKorvvvuOoUOH4unpScWKFbG1teWrr76iWrVq6PV6GjRogI+PD127duWVV17hzJkztGjRgtu3b7Nx40bWr1+f79fD19eXJk2aMHDgQObPn4+trS3vv/8+1tbWuRrT17FjR4KCghg9ejS9evUiMjIyzaD+1KEjAwYMoHfv3gwePBi9Xk9kZCTNmzfn+eefx9XVFVNTU/773/9SqlQpSpUqRbNmzRg6dChLliyhW7duvPvuu1rv01KlSjFu3Lg8uiqiIOjMS2Fimnk40CWVjHBRMkK/KJKyM7/ksGHD8PPzY/To0TRv3lwbf5eR1q1bs2/fPu7evcvQoUPp2rUrc+bMoXTp0tSsWRMAExMTVq5cyblz5/D29qZ58+Zay3TTpk2MGTOGTz/9FF9fXwICArK8jZtXdDodW7dupWHDhgwfPpzXX3+dbt264ePjg42NTY7L69q1K++99x5bt26lR48e7N27l+3bt6dJ169fP7Zu3crly5fp378/AwYMYN++fVqnHAcHBz755BNtEurmzZsDKUsXRUREaPUdNGgQdnZ27N27N90lz0TRJS3Ff8ncp0IUYQ8fPqRu3bq0a9cu01ucQuRG6tyn4Q1nY2VqkWnau0nxdPg18Jmf+7RktIeFKCY+++wzkpOTqV27NtHR0fznP//h/PnzfP3114VdNfEM05XKunepzH0qhChwFhYWLFiwQJt2rmHDhuzYsYNmzZoVbsXEM01nmvXtUV0JedomQVGIImTw4MEMHjy4sKshSpjsjEPUlZC5XiQoCiFECWeiN8WkVObhwCSdxZWfRRIUhRCihMtO71K5fSqEEKJEyNYzRbl9KoQQoiTI1jPFZAmKQgghSoBs3T6VlqIQQoiSQILivyQoCiFECWdiXgoT8yx6n+pKRrgoGWcphBAiQzpTHTrTzJeGyur9Z4UERSGEKOF0pXRZ3z5NlqAohBCiBMjWM0XpfSqEEKJEyMaQDLJ6/xkhQVEIIUq4bA3eT5KgKIQQogTI1uB9aSkKIYQoCbI1JEOVjHBRMs5SCCFExkx1KVtWaUoACYpCCFHC6UxM0Jlkcfs0i/efFRIUhRCihNOZZOOZogRFIYQQJYHORJeNlqLcPhVCCFESyDNFTcloDwshhMiQiVmpbG05derUKTp27IjBYKBixYpMnjyZhw8fZpmvatWq6HS6NFt8fHxuTi9HJCiKZ1rVqlV58803tddbtmxh2bJlOS7n/Pnz6HQ6Nm7cmJfVA8Df3x93d/cs0+l0OhYtWmS0b/LkyTg6OmJiYsK4ceMyPL/sHkOUTKkdbbLaciI6OhovLy8ePnxISEgI8+bN47PPPiMgICBb+fv06UNkZKTRptfrc3N6OSK3T8UzbfPmzdjZ2Wmvt2zZws8//8zIkSMLsVa5ExkZiYuLi/Z69+7dLFy4kCVLltCiRQucnJwIDAxM9/zeeust7t27V9BVFsVFPtw+Xb58OXFxcWzevBl7e3sAEhMTGTlyJNOnT8fJySnT/BUqVKBly5Y5OmZekKAonmmNGzcu7CrkmSe/IE6dOgXAmDFjMMnir/gaNWrkW71E8afTZWNIhi5nLcXQ0FB8fHy0gAjQt29fRowYQVhYGP7+/rmpar6T26eiWEq9HRgaGoq7uzsWFhY0bdqUAwcOGKV7/Papv78/X375JcePH9eeUTz+ixkZGUmnTp0oU6YM1tbWtGjRgu+++86ovPj4eN58803s7OxwdHRk4sSJJCYmZlrX48eP07VrV8qWLUvp0qWpXbs277//fpp0ERERNG7cGIPBwHPPPccvv/xi9P7jt089PT0ZPXo0AKampuh0OqpWrZrh+T15+3TVqlXodDqOHDmCr68vBoOBWrVqsXr1aqNjKqWYM2cOFStWxMrKCj8/P3bv3o1OpyMiIiLT8xbFR+o0b1ltAHFxcUZbQkJCumWeOnWKOnXqGO2ztbXF0dFR+4MuM2vXrkWv12NlZUXXrl05duzY059oNkhQFMXW1atXGTlyJJMmTWLDhg3o9Xo6d+7M9evX003/1ltv0bVrV6pXr649o3jrrbcA+Omnn/D09CQhIYEvvviCTZs20bNnTy5evGhUxowZMzAxMWHDhg2MGDGCxYsX88UXX2Raz+7duxMdHc2KFSvYsWMHEydOTHMr89q1a4wZM0Y7l/j4eHr16sWjR4/SLXPZsmWMGzcOQDuXrVu3Znh+GRk0aBCdOnViy5YtNG7cGH9/f06ePKm9v3TpUmbNmoW/vz8hISHUqFGDV199NdMyRfGTMiQj6w3A2dkZGxsbbZs/f366ZUZHR2Nra5tmv52dHbdv3860Pj169ODjjz9m9+7dfPLJJ/z111+0bduWs2fPPvW5ZkVun4pi6/bt2wQHB+Pl5QWAh4cHzs7OLFmyJN1f1Bo1alCuXDkuXLiQ5lbk5MmTqVmzJnv27MHU1BSATp06pSmjRYsWfPTRRwB07NiR8PBwNm7cyIgRI9Kt482bNzl37hwffvgh3bt3B6BDhw7pnssPP/xAvXr1ADAYDHTo0IGDBw/Stm3bNOnr1q2rPV98/FwyOr+MvPnmm9rzx9atW7Njxw42bdrEzJkzSUpKYsGCBQwdOpQFCxZo1+TmzZusWLEiW+WL4kFnVgpdFr1LdYkp71+6dIkyZcpo+/Oj80vq7xhAu3bt6NSpE3Xq1GHRokW56iiXE9JSFMWWjY2NFhBTX/v4+HDw4MEclXP//n0OHDjAkCFDtICYkScDZd26dYmKisowfdmyZXFxcWHatGl8+eWXGaZ1cnLSAmJquUCmZeeFx8/HYDDg4uKiHTMqKoqrV6/So0cPozw9e/bM1zqJgqcz1WXj9mlKS7FMmTJGW0ZB0c7OjtjY2DT7o6OjjZ4zZoejoyNt27ZN80ghP0hQFMVWuXLl0uyrUKECV69ezVE50dHRJCcnZ9kbDkhzO8jc3DzTsVM6nY6wsDDc3NwYNWoUzs7ONGvWjL1792ZZLpDv47IyO5/U6/jkdS5fvny+1kkUvJzcPs2uOnXqpHl2GBsby9WrV9M8ayxKJCiKYuvGjRtp9v399984OjrmqBxbW1tMTEy4cuVKXlXNiKurK8HBwURHRxMREYFer6d79+7cvXs3X46XV1Kv45PXOaNntqIYMzHJ3pYDvr6+7N69m5iYGG1fcHAwJiYm6T6ayMyVK1fYt28fzZs3z1G+3JCgKIqt2NhY9uzZY/R69+7dtGjRIsM86bXsDAYDrVq1YvXq1SQlJeVbfc3MzPDw8GDq1KnExcXlSxDOquWaE5UrV6ZixYps3brVaP+WLVvypHxRdKROCJ7plsOgOGLECKytrXnhhRcICwtj5cqVTJo0iREjRhjdlfH29qZmzZra66+++opBgwaxdu1awsPDWbFiBe3bt8fU1JQJEybk2TlnRDraiGLL3t6eYcOGMXv2bGxtbVmwYAFKKa1XZnrc3Nz473//y1dffUWtWrVwcHCgatWqLFiwAC8vL3x8fBg5ciR2dnb83//9Hw4ODrzyyiu5ruNvv/3GhAkT6NevHzVq1CA2Npb58+dTtWrVfBk7mNH55YapqSnTpk1j3LhxVKhQgQ4dOhAeHs7u3bsBshwbKYqR7MxYk8PP287Oju+//57Ro0fzwgsvYG1tzauvvsrcuXON0iUlJRkNa6pWrRpXrlxh3LhxxMTEYGtri5eXF3PmzKFatWo5qkNuSFAUxZajoyPvvfcekyZN4syZM9SrV49du3ZRoUKFDPMMGzaMQ4cOMXr0aG7dusWQIUNYtWoVbdu2JSIigpkzZ+Lv74+pqSn16tXj3Xfffao6VqxYkYoVKzJ//nwuX76MjY0N7dq1Y82aNVl26smNjM4vt0aPHk10dDTLli3jo48+wsfHh4ULF9KvXz9sbGzyruKiUOnMTNGZZf7zmNX76XFzc9P+iMrIk+NdW7ZsSXh4eI6PlVd0SilVaEcXIpf8/f35+eef+f333wu7KiXOW2+9xeLFi7l16xaWlpaFXR3xFOLi4rCxseHCukOUKW2Vedr7d3EZ+ByxsbFGQzKeNdJSFEJk6OTJk6xZs4bWrVtjbm5OREQEixYt4o033pCA+AzJTu9SWU9RCFHilS5dmsjISP7zn/9w584dKlWqxKRJk5g1a1ZhV03kocenccssTUkgt0+FEKKESr19emnj/1HGkMXt03t3ce7TRG6fCiGEeLbpdCkTTWSVpiSQoCiEECWdiS5lyypNCSBBMR8lJydz5coVrK2ts/wrTAghlFLcuXMHJyenAh0HKs8U/yVBMR9duXIFZ2fnwq6GEKKYuXTpEpUrVy6w40nv039JUMxH1tbWAJw7elL7d3Ykm+f8L7JStx/mKP2jChY5PsazpKB6l5WMr5G8ZXYj/UVrM5WbRszD5Bwlv/DpDzk+hPNEnxylv3PnDtUau+Xo+yJPmJD1NSwZDcWnC4oygDpzqbdMra2tKWOd/d5ayfpcBMVHOQyKZSQoFgQJijlnFl80g6K1vnSOD5GT3/vHFfTjFp1Ol42ONiXjp/mpguJbb72VZgVxIYQQxYx0tNE8VVDMjwmN0/PgwQOZPUMIIfKLBEXNU90l9vf3x93dHYBVq1ah0+k4cuQIvr6+GAwGatWqxerVq9Pk27FjB23atKF06dLY2dnh6enJkSNHgJTJYXU6HTt27KBPnz6UKVMGPz8/AGJiYhg5ciSOjo7o9XqaNm1KWFhYmrI7duxI+fLlKVOmDC1atODbb781ShMTE8Nrr71GpUqVsLCwwNnZmf79+xuliYqK4qWXXsLBwQFLS0vat29fIKs+CyFEQdOZ6NCZZrFJUMydQYMG0alTJ7Zs2ULjxo3x9/fn5MmT2vvr16+ne/fulC9fnnXr1rF27VratGnD5cuXjcoZPnw4NWrUYPPmzUycOJGHDx/SsWNHtm/fzty5c9m2bRt169alW7duHDt2TMt37tw5unfvzv/+9z82bdpEmzZt6Nq1q9FM7AEBAWzfvp158+axa9cuFi5ciF6v196Pjo6mbdu2HD16lKVLl7Jp0yYMBgNeXl6ZLrCakJBAXFyc0SaEEEVdau/TrLaSIM97n7755puMHDkSgNatW7Njxw42bdrEzJkzUUoxceJEOnXqxObNm7U8Xbt2TVNOjx49eO+997TXK1eu5OjRo/z666/UrVsXgM6dO/Pnn3/yzjvvsGHDBu34qZKTk+nQoQPHjx/ns88+w9PTE4BDhw4xcOBAhgwZoqV9vKUYFBRETEwMhw4donz58kDKQpiurq4sWrSI999/P91znz9/PrNnz87R9RJCiEKXMqVN1mlKgDxvKXbq1En7t8FgwMXFhaioKABOnz5NVFRUthZt7datm9HrsLAw6tevj6urK4mJidrWsWNHDh8+rKWLiopiyJAhVKpUiVKlSmFmZkZYWBh//PGHlqZJkyasWrWKRYsWpdtzNiwsjA4dOmBvb68dx9TUFA8PD6NjPWnatGnExsZq26VLl7I8TyGEKGzSUvxXnrcUbW1tjV6bm5sTHx8PwK1btwBwcnLKspwnF4q9efMmR44cwczMLE3a1MVak5OT6dGjB7GxscyZM4eaNWtiMBh4++23uXjxopZ+6dKl2Nvbs3jxYiZNmoSzszPTpk3jjTfe0I514MCBdI+VWecivV5vdBtWCCGKBelooynQwftly5YFUmZ6ycqTY2Ls7e1p0KABK1asyDDPX3/9xZEjR9iyZQs9e/bU9j948MAonY2NDUFBQQQFBXHs2DE+/PBDRo4cibu7O+3atcPe3p4uXbrwzjvvpDmGBD0hxLNGxin+q0CDYu3atalcuTIrV66kb9++Ocrr4+PDzp07cXJyyrClmRr8zM3NtX0XLlzgp59+wtXVNd089evXZ8mSJaxYsYKTJ0/Srl07fHx8WLNmDW5ubhgMhhzVUwghipvUHqZZpSkJCjQo6nQ6Fi1axIABA+jduzeDBw9Gr9cTGRlJ8+bNef755zPMO3jwYD799FM8PT2ZOHEirq6uxMTEcOTIER4+fMj8+fOpU6cOlStXZurUqSQlJXH37l0CAwOpVKmSUVlt2rShV69euLu7Y2pqyurVqzE3N6ddu3ZASu/UtWvX4uHhwdixY6lSpQo3btzg4MGDODk5MX78+Hy9TkIIUaDk9qmmwOc+7devH6VLl2bu3Ln0798fCwsLmjRpQq9evTLNp9fr2bNnD7NmzWLu3LlcvXoVBwcHGjdurPV21ev1hISEMGrUKPz8/HB2dmbmzJns2bOHn3/+WSurTZs2rF69mnPnzmFiYkL9+vX55ptvcHNzA1Ju8x44cICZM2cyZcoUbt26Rfny5WnZsmWW9RRCiOJGJgT/l04pVVDTQJY4qata/33tWo5WqpYPJP/l5tc7N59LQR2npCuq1zlh94UcpY+7fxeX/s0LbHX71O+oW7+co4xV5pOQx929Q9mm1QqsboVFVskQQogSTqfLuiVYQvrZFN/FQB6fYi6/xcTEMGvWLE6cOFEgxxNCiAJlks2tBCghp/l0YmJimD17tgRFIcQzSWdqkq2tJJDbp0IIUdLpyPrBrNw+LR4iIiJo3LgxBoOB5557zmglC6UUixYtwtXVFb1eT/Xq1VmyZIlR/lOnTtG/f3+cnZ0pXbo0devWZfHixSQnpyxAev78eapVqwaAn5+fNsj1/PnzBXaOQgiRr1LnPs1qKwGKdUvx2rVrjBkzhqlTp2JjY8O0adPo1asXZ86cwczMjLFjx/LFF18wY8YMWrRowf79+5kyZQqWlpaMGDECgMuXL1O7dm0GDRqEtbU1R48eJTAwUBvj6OjoSEhICC+++CLz5s2jQ4cOADg6OqapT0JCAgkJ/64aLqtkCCGKA6VL2bJKUxIU66B4+/ZtfvjhB+rVqwekTEDeoUMHDh48iKOjIx9//DHLly9n+PDhQMqsOPfv32f27NkMHz4cExMTvL298fb2BlJalm3btuX+/ft8/PHHBAYGotfrady4MQC1atWiZcuWGdZHVskQQhRLcvtUU6xvnzo5OWkBEdCWlIqKimL37t0A9O7d22hVDR8fH65du6atYBEfH09gYCA1a9ZEr9djZmbGjBkzuHr1Knfv3s1RfWSVDCFEsSS3TzXFuqWY3oockBLobt68iVIKBweHdPNeunQJFxcXpkyZwueff05gYCBNmzbF1taWrVu38u677xIfH4+VlVW26yOrZAghRPFWrINiZuzt7dHpdOzbt89ogvBUtWvXBiA4OJjXX3+dKVOmaO/t2LGjwOophBCFTZnoUFkM3s/q/WfFMxsUU58T3rp1i+7du2eY7sGDB0ZBMykpia+//toozeMtUCGEeObIM0XNMxsUXV1dGTVqFC+//DKTJk2iRYsWPHr0iD/++IPw8HC2bNkCQMeOHfn888+pW7cuDg4OLFu2zKgHKUDFihWxtbXlq6++olq1auj1eho0aJBuC1QIIYodCYqaYt3RJisfffQR7777Ll9//TXdunXjpZdeYv369Xh4eGhpli5dioeHB6NHj2bYsGHUr1+f6dOnG5VjYmLCypUrOXfuHN7e3jRv3jxbCyULIURxkDokI6utJJBVMvJR6gz0e1vNx6qURbbzVRraOsfHKtOvfo7SmzxMzvExkvW5+BsqKRc/XgWxmGlBLXlRQAril7igVqIwScx5rtiNv+c4z9EpG3KU/vTtizk+RmJyUo7Sx6tHTOebAl8l4+bZy5Sxzvx4cXficKheSVbJEEII8YyT26eaHP3pn98rUzRq1Ah/f/98K18IIURaSqfTeqBmuJWQcYrP9DNFIYQQIieKVVBUSqXpGSqEEOIp6bK5lQC5CoqhoaG4u7tjYWFB06ZNOXDggNH7q1atokGDBlhYWFCpUiVmzJhBUpLxA+f9+/fTtGlTLCwscHd3JzQ0NM1xUm/X7ty5k4YNG6LX6/nmm28ACAkJoVGjRlhYWODk5ERAQECacYQXLlygT58+2NjYYDAY6Ny5M8eOHTNKU7VqVd58802CgoJwdnbG2toaf39/EhISOHr0KG3atNFW4HgyrxBCPBNkmjdNjjvaXL16lZEjRzJr1izs7OxYsGABnTt35s8//6R8+fJ88MEHTJ48mfHjx7N48WJOnjypBcUFCxYAKatbdO7cmfr167Nhwwaio6N54403uHfvHo0aNTI63pUrVxgzZgwzZ86kSpUqVKlShW3bttGnTx/69+/PggULOHXqFNOnT+fixYts3LgRgDt37uDp6YmJiQnLly/HwsKCuXPn0r59e3777TecnZ21Y2zduhV3d3c+/fRTzp49S0BAAObm5kRGRhIQEECFChWYMmUKfn5+nDhxAhOT9P+WkFUyhBDFknS00eQ4KN6+fZvg4GC8vLwA8PDwwNnZmSVLljB9+nQCAwOZPHky8+bNA1IGx5ubmxMQEMCkSZMoW7YsQUFB6HQ6QkNDsbGxAcDZ2VmbheZx0dHRhIaG0qJFC21f3759admyJevWrQOgS5culC5dmtdff51jx45Rv359Vq5cyYULFzh+/Dhubm5aXatUqUJQUBCLFy82Os7WrVu1wfgRERF8/vnnhIaG0qVLFwCSk5Pp3r07x44do2HDhuleG1klQwhRHOXX0lGnTp1i9OjR7N+/H2trawYPHsy7776b5cQnSinee+89li1bxo0bN2jUqBFLlizJdJWivJLj26c2NjZaQEx97ePjw8GDB9m/fz93797Fz88vzcoUDx484PffU8YSHTx4kA4dOmgBEcDLywt7e/s0xytbtqxRQLx79y5Hjx6lT58+Run69esHwL59+wD48ccfcXd31wIipMyH2rFjRy1NKg8PD6MPydXVFRMTE6PzdHV1Bch05QtZJUMIUSyZ6LK35UB0dDReXl48fPiQkJAQ5s2bx2effUZAQECWed977z0CAwMZP34827dvx9HRkU6dOnH27NncnmG25bilWK5cuTT7KlSowMmTJ7l58yYATZo0STdvapC4evUqNWvWTPN++fLl0y37cTExMSil0uy3sbFBr9dz+/ZtIOUDeTJNanmpwTlVeqttWFpaGgXK7Mx/KqtkCCGKpXy4fbp8+XLi4uLYvHmz1uBJTExk5MiRTJ8+HScnp3TzxcfHM3/+fCZMmMD48eMBaNeuHa6urixatIhly5blrCI5lOOgeOPGjTT7/v77bxwdHbUTDwkJMXpml6patWpAyqr1169fT/N+evt0TzzctbW1RafTpUkbGxtLQkKCVgd7e3tOnz6dbl3Ta5EKIURJpXRZj0PM6TjF0NBQfHx8jL5v+/bty4gRIwgLC8twTPr+/fuJi4ujb9++2j5zc3NefPFFQkJCclSH3Mjx7dPY2Fj27Nlj9Hr37t20aNGCVq1aUbp0aaKiomjWrFmarWzZsgA899xzhIeHExsbq5WzZ88erZWXGSsrKxo1aqR1qEm1YUPKlE1t27bV/n/s2DGjwBgdHc3u3bu1NEIIIcjRkIy4uDijLaNhcqdOnaJOnTpG+2xtbXF0dOTUqVMZViX1vSfzurm5cfHiRR48eJDz88uBHAdFe3t7hg0bxurVq9m2bRu+vr4opRg3bhy2trbMmTOHyZMnM2XKFEJDQwkLC2P58uX4+vpy//59AMaNG0dycjK+vr5s27aNL7/8kldeeUULmlmZNWsWkZGRvPTSS3z77bd8+OGHjBs3jt69e1O/fsocoEOHDsXFxYVu3brx9ddfs2XLFjp16kSpUqUYN25cTk9bCCGeYSqbW0qnSBsbG22bP39+uiVGR0eneTQFYGdnl2kDKDo6Gr1ej4WF8XzRdnZ2KKWIjo7O8dnlRI5vnzo6OvLee+8xadIkzpw5Q7169di1a5f2/G7ChAlUqlSJDz74gKVLl2JmZkaNGjV4/vnntedyjo6OhIaGMmbMGPz8/KhRowaffPIJM2bMyFYdevToQXBwMHPmzKFnz57Y29szfPhwow/H2tqaiIgIAgICGD58OElJSbRp04a9e/eme2s3P6TOtX4vMWfrMN55cDfnB8vh8A+ZEDwXeYpwl/SSPiF4bn5n7iXnbCKQB+pRjo+RpHI4ITgpxyjodRrUP/9llQZS+oY8PiH4M9ePQol8c+nSpez++SWbbLLJpm2XLl0qkO+o2NhYBairf19W9x7cyXS7+vdlBajY2NhslV2uXDk1derUNPudnJzUlClTMsz3ySefKEA9ePDAaP9nn32mdDqdun//fs5OModklYx85OTkxKVLl7C2tk7TYSguLg5nZ+c0f3VlJqd5CuIYBZWnqNaroPIU1XrlJk9RrVdB5cksvVKKO3fuZNgzM78oklBk3qrN6v0n1alTJ82zw9jYWK5evZrmeeGT+QBOnz5tNCb81KlTVKlSBUtLyxzVI6ckKOYjExMTKleunGmaMmXK5HhtspzmKYhjFFSeolqvgspTVOuVmzxFtV4FlSej9I+P3y4oObl9ml2+vr7MmzePmJgY7dlicHAwJiYmdOrUKcN8rVu3pkyZMgQHB2tB8dGjR4SEhNC1a9cc1SE3itWE4EIIIfKBSs7elgMjRozA2tqaF154gbCwMFauXMmkSZMYMWKEUUvY29vbaNy6hYUF06ZNY9GiRXz44Yfs2bOHAQMGcOvWLSZOnJhnp5wRaSkKIUQJl/IwM6uWYs7Y2dnx/fffM3r0aF544QWsra159dVXmTt3rlG6pKQkEhMTjfZNmTIFpRSLFi3SpnnbtWsX1atXz2Etck6CYiHR6/UEBgbmqOdWTvMUxDEKKk9RrVdB5Smq9cpNnqJar4LKk5tj5DdFMorMW4JZvZ8eNzc3du/enWmaiIiINPt0Oh3Tpk1j2rRpOT7m09IpVcB9f4UQQhQJcXFx2NjYcOnan5QpY51F2js4V6xFbGxsjp+3FifSUhRCiBIumSSSs+hdmtX7zwoJikIIUeKlDpHMKs2zT4KiEEKUcPn1TLE4kiEZ+ahq1arodLo0W2bLT6U6deoUHTt2xGAwULFiRSZPnszDhw8LoNbpe+mll6hVqxYGgwE7Ozvat29PWFhYtvJeuXKF3r17Y21tjb29Pa+++ipxOZyWLi8tW7aM559/nnLlyqHT6dJMLp+ZovK5/PXXX4wYMYJGjRpRqlQp3N3ds5VPKcWCBQu0QdCtWrXiwIED+VzbjO3cuRMPDw/KlSuHXq+nevXqBAQEGC0WkJEVK1bg6uqKhYUFDRs2ZPv27QVQ44ytWrUq3d/3qVOnZpqvKHwmKpv/lQTSUsxnffr0YcKECUb7sup1lro4Z61atQgJCeHy5csEBARw//59Pv744/ysboYePnxIQEAAtWrVIj4+nhUrVtC1a1fCw8Np165dhvkePXpE586dAVi3bh33799n4sSJDBw4sNC+xFavXg1A165dtX9nR1H6XI4fP86OHTto0aIFycnJJCdn76/41MVbFyxYQIMGDfjkk0/o1KkTR48eLZDu7k+6ffs2LVq0YMyYMZQtW5bff/+dWbNm8fvvv2f6R9fXX3/Na6+9xowZM/Dy8mL9+vX06tWLH3/8sUBWZ8/Mt99+azQAv1KlSpmmLxqfSfI/W1ZpSoB8nUSuhHNxcVGjRo3Kcb558+Ypg8Ggbt26pe379NNPlampqbp8+XJeVjHXEhMTlbOzs3rttdcyTbdu3Tql0+nUqVOntH27du1SgDp48GB+VzNdSUlJSimlzp07pwAVHBycrXxF6XNJPQellBoyZIiqV69elnkePHigypQpo6ZNm6btS0hIUC4uLuqNN97Il3rmxmeffaaATK+pq6urGjBggNG+Vq1aKV9f3/yuXoZWrlypAHXjxo1s5ynszyR17tNz135VN++fzXQ7d+3XHM19WlzJ7dMiKKPFOZOTk7N9yzK/mZqaYmtrm+Wtw9DQUBo0aEDt2rW1fR07dsTe3p6dO3fmdzXTZWKSux/7p/lcQkNDtdtpj68G06tXL3Q6HQaDgT/++CPbdcnNOWS2eGthfRbpSV1CLqOfrbNnz/LHH38YnQdA//79+f777zNc368oKiqfSercp1ltJYEExXy2du1a9Ho9VlZWdO3alWPHjmWZJ7eLc+Y3pRSJiYncunWLRYsW8eeff/L6669nmie9c9HpdOlOFlzUPc3n4uvry/DhwwFYuHAhx44dY8OGDWzZsgVIuYXm6uqaL/VOVdiLt2YmKSmJ+Ph4/u///o85c+bQo0cPqlatmm7azM7j4cOHnDt3Lr+rm6l69ephampK9erVmT9/PklJGQeTovKZKKVQKjmLTZ4piqfUo0cPWrRoQZUqVTh79ixz586lbdu2HDlyJNNnBbldnDO/rVixgtdeew0AKysr1q9fT6tWrTLNU1TPJTee9lwWL17M7t27OXv2LEOHDuXSpUsA+Pj4MGrUqLyubhrZWbw1v1cgyIiLiwuXL18GoEuXLqxbty7DtKmLzD75WdjZ2QEU2s+Vo6Mjs2fPpkWLFuh0OrZt28bMmTO5fPlyhs+ci8pnkh8TghdXEhTzgFLK6K9BnU6HqakpH330kbavXbt2dOrUiTp16rBo0SKWLVtWGFXNUkbnAvDCCy/QqFEjbt68SXBwMH379mXz5s34+voWVnUzldm5FAYrKytWr15N+/bt+eWXX4CUFRFWrlyZZmmxkmbnzp3cu3eP48eP8+6779K9e3e+++67Qv28cqpz585apzKATp06YWlpyZIlS5gxYwaOjo6FWLusSEebVHL7NA/88MMPmJmZaZu3t3e66RwdHWnbtq32hZgROzu7dLukR0dHGz3Pyg+ZnYuDgwPNmjWjS5curFixAl9fXyZNmpRpeUX1XHIjL86lTZs2NGvWTHvdo0ePLJcXyyt2dnYkJCSkGRIUHR2NTqfTWlqFoUGDBrRq1YpXX32VrVu3Eh4ezubNm9NNm1rPJz+L1BZkfv9c5UTfvn1JSkri6NGj6b5fVD4TGZLxL2kp5oGmTZty+PBh7bW1deZzCGYlt4tz5oWcnEvTpk0JDQ3NtLw6deqkeY6qlOL06dN07Njx6SqbhaL4uXz55ZccOnRIe7127VpGjBhB69atn6pu2VHYi7dmV4MGDTAzM+Ovv/5K9/3U8zh16pRRB65Tp05hbm5eKENLcquofCYyeP9f0lLMA9bW1jRr1kzbHv9FfdyVK1fYt28fzZs3z7Q8X19fdu/eTUxMjLYvO4tz5oXsngvAvn37svwC8vX15ddff+XPP//U9n3//ffcunUr3xcMzcm5ZMfTfi6XLl1i7NixQEoL0c3NjeTkZIYMGcL9+/efqm7Z8fjirakKcvHW7Dp48CCPHj3K8GerevXquLq6Gp0HwPr16/H29sbc3LwgqpktX3/9NaampjRu3Djd94vKZyK9Tx9TaINBnnHr1q1TAwcOVGvWrFF79uxRX3zxhapRo4ays7NTZ8+e1dJFREQoU1NT9eWXX2r7bt++rRwdHZWHh4fatWuX+u9//6tsbW1zNeYxL2zfvl317dtXrV69WoWHh6tNmzap3r17K0B99dVXWrrz588rU1NTNXv2bG3fw4cPlbu7u6pfv7765ptv1Pr165Wzs7Pq1q1bYZyKUkqpw4cPq+DgYLVs2TIFqAkTJqjg4GAVERGhpcnrzyU5OVl5e3srQNnZ2amrV6+qyMhIZWJiogA1cuTIHJ3DvXv3VHBwsAoODlaenp7K2dlZe339+nWllFJeXl6qRo0aRvnmz5+v9Hq9CgoKUt9//73q3bu3sra2VmfOnMnR8fNKr1691Ny5c9U333yjdu/erRYvXqwqVqyoGjRooBISEpRSSr3yyivK1NTUKF/q+Ne3335bhYeHqxEjRqhSpUqp/fv3F8ZpKKWU6tSpk1qwYIHasWOH2rFjh3r99deVTqdT48aN09IUtc8kdZziySs/qKi7v2S6nbzyQ4kYpyhBMZ9ERkYqT09P5eDgoEqVKqUcHBxU3759jQaxK6VUeHi4AtTKlSuN9p84cUJ5e3srS0tLVb58eTVx4kTtS6KgnTx5UvXs2VM5OTkpc3Nz5eTkpLp06WIURJT6dzB8YGCg0f6oqCj14osvKisrK2Vra6teeeWVQv3FGjJkSOrsx0abh4eHliavP5elS5dqx1m1apW2f8KECQpQOp1OhYWFZfscUq91elt4eLhSSikPDw/l4uJilC85OVnNmzdPVa5cWen1etWiRYtCDSTz589XjRo1UtbW1spgMKh69eqpt956y+jnI/XzetIXX3yhatasqczNzbU/ugrTmDFjVK1atZSlpaXS6/Wqfv366sMPP1TJyclamqL2maQGxeOX96iLdw5luh2/vKdEBEVZT1EIIUqo1PUUf7+8G+syhkzT3om7h3slH1lPUQghxLMtWSWRrLJYTzGL958VEhSFEKKEk6D4LwmKQghRwiWrZJJV5kMusnr/WSFBUQghSrhk9ZAkZZZlmpJAgqIQQpRwKS3FrG6floyWogzezwF/f/9sr3AuhCjesvP7rtPpWLRoUY7Lzm2+/JL6TDGrrSSQlqIQQuRSZGQkLi4uhV2Np5ZMNjralJAZbSQoPiMePHhQZOauFKKkaNmyZWFXIY2EhATMzMxytBB1MklZBr2SEhTl9ulTOHbsGJ07d8ZgMGBjY0OfPn24ePGi9v6wYcNo166d9vrmzZuYmJgYzX169+5dzMzMjOY+PHnyJD179sTGxgaDwUC3bt04c+aM0bF1Oh0LFixgypQpVKxYkfLly+fjmQpRckVERNC4cWMMBgPPPfec0So3T94GVUoxZ84cKlasiJWVFX5+fuzevRudTkdERIRRucnJycyaNYsKFSrg4ODA0KFDuXfvnlGaqKgoXnrpJRwcHLC0tDRadixV1apVefPNN3n//fdxcXHB0tIyx2tKKpWUra0kkKCYS5cuXaJ9+/bcunWLNWvWsHz5cv7v//4PDw8P7ty5A0D79u05fPiwtizM3r170ev1HDlyREuzf/9+EhMTad++PQBnz56ldevW3L59m1WrVrFu3Tpu3LiBt7c3CQkJRnX48MMP+eOPP1ixYgVr1qwpwLMXomS4du0aY8aMYdKkSWzYsIH4+Hh69erFo0eP0k2/dOlSZs2ahb+/PyEhIdSoUYNXX3013bQff/wxf/75J19++SVvv/0269at45133tHej46Opm3bthw9epSlS5eyadMmDAYDXl5eXL9+3aisTZs2sX37dj788EO2bt2KwZD57DRPSkx+mK2tJJDbp7m0ZMkSHj16RFhYmLZ+W+PGjalbty6rVq1i9OjRtG/fnoSEBA4ePIiHhwd79+6lV69ehIWF8dNPP9GlSxf27t2Lq6srFSpUAGD27NnY29vz3Xffaatxt27dmurVq7NixQpGjhyp1cHe3p6QkJASv0CtEPnl9u3b/PDDD9SrVw8Ag8FAhw4dOHjwIG3btjVKm5SUxIIFCxg6dCgLFiwAUhYavnnzJitWrEhTtqOjI2vXrgWgS5cu/N///R8bN27U8gYFBRETE8OhQ4e0O0He3t64urqyaNEi3n//fa2sR48eERoamuNgmCpZqWyMUywZM4JKSzGXfvzxR7y8vIwWNK1Tpw4NGzZk3759AFSrVo3KlSuzd+9eIKWl6OnpSbt27fjhhx+0famtRICwsDB69OhBqVKlSExMJDExETs7Oxo3bmy0NiCkLGUkAVGI/OPk5KQFRIC6desCKbc1nxQVFcXVq1fp0aOH0f6ePXumW/aT64nWrVvXqNywsDA6dOiAvb299l1gamqKh4dHmu8CT0/PXAdESFk6KjmLraQsHSUtxVyKjo6mUaNGafZXqFDB6H5+agsxLi6OX3/9lfbt23Pv3j02btxIQkIChw4d4rXXXtPS37x5k6CgIIKCgtKU/eQ6camtSyFE/rC1tTV6nfo7mPpI5HFXr14FoFy5ckb7M3ren17Zjz8iuXnzJgcOHMDMLO2g+ho1ahi9ftrvguw8MywpzxQlKOaSvb19mvv6AH///Teurq7a6/bt2xMQEEBERAQODg7UqVOHe/fuMWXKFMLDw0lISDDqjGNvb0+3bt2MbpOmenLleGklClF0ODo6AnDjxg2j/el9T2SHvb09Xbp0MXrOmEqv1xu9ftrvApn79F8SFHOpbdu2fPbZZ0RHR2NnZwfA6dOn+e2333jllVe0dKktww8++EC7TdqoUSMsLS1ZsGABzs7OVK1aVUvv4+PD77//TuPGjTE1NS3QcxJC5F7lypWpWLEiW7duNbplumXLllyV5+Pjw5o1a3Bzc3uqW6PZIUHxXxIUc2n8+PGsXLmSTp06MWPGDOLj45k5cyZVqlTB399fS1enTh3Kly/PDz/8wEcffQSAqakpbdq0ITQ0lEGDBhmVO3v2bJo3b07nzp0ZPnw4FSpU4Nq1a/zwww+0a9eOAQMGFORpCiGyydTUlGnTpjFu3DgqVKhAhw4dCA8PZ/fu3QA5GjcIEBAQwNq1a/Hw8GDs2LFUqVKFGzducPDgQZycnBg/fnye1T1RPSJRZf5HeKJKv8fts0Y62uSSs7MzP/zwA3Z2dgwaNIjhw4fTsGFDIiIi0tzmTG0hPt6hxsPDI80+gJo1a3Lo0CHKli3LyJEj6dy5M1OnTuXevXs0aNAgn89KCPE0Ro8eTWBgIP/973/p1asXJ06cYOHChQDY2NjkqKyyZcty4MABGjVqxJQpU+jUqRPjx4/n/PnztGjRIk/rLeMU/6VTqoT0sxWiEERERNChQwcOHz5Ms2bNAJg1axadOnWidevWhVw7URDeeustFi9ezK1bt4rcrFNxcXHY2Niw5fgcDNYWmaa9dyeeF+q9TWxsLGXKlCmgGhY8uX0qRD5q0qQJkZGRuLm5aftmz56NlZWVBMVn0MmTJ1mzZg2tW7fG3NyciIgIFi1axBtvvFHkAuLj5JnivyQoCpGPypQpUyTnxxT5o3Tp0kRGRvKf//yHO3fuUKlSJSZNmsSsWbMKu2qZkkWG/yXPFMUz7/jx43Tt2pWyZctSunRpateubTQbSGRkJF5eXtoctgMHDjTqRn/+/Hl0Oh3/+9//GDFiBLa2tpQvX54PPvgAgK+//pratWtTpkwZXnzxRWJiYrS8ERER6HQ6fv75Z+DfrvOTJk1Cp9MZzYkZHx9PQEAATk5OWFhY0KhRIzZv3mx0LqnLGWU2HyekzMG5aNEiXF1d0ev1VK9enSVLlhiliYqKom/fvlSoUAELCwuqVauWp503SiIXFxf27NnD7du3efToEefPn+fdd9+lVKmi3f5Q2Vg2qqQ8Uyzan5QQeaB79+5UqFCBFStWYGNjw19//aXNHBIZGYmnpyddu3Zl/fr13Lt3j5kzZ9KzZ08iIyONypkxYwa9e/cmODiYLVu2MGHCBG7cuEFERATvv/8+cXFxjB49msmTJ/PZZ5+lW5fIyEhatWrF6NGjGThwIPDvLCmDBg3i22+/Ze7cudSpU4fVq1fTu3dvtmzZYjRLSup8nFOnTsXGxoZp06bRq1cvzpw5ow30Hjt2LF988QUzZsygRYsW7N+/nylTpmBpacmIESMAGDx4MFeuXOGjjz6iQoUKXLx4UQveomR5lJTIo6TMe5c+SkosoNoUMiXEM+zGjRsKUNu2bUv3/fbt26vWrVur5ORkbd/x48eVTqdTO3bsUEopde7cOQWovn37amkSExNVhQoVlMFgUDdv3tT2T5gwQdna2mqvw8PDFaAOHz6s7QPUwoULjerx66+/KkAtX77caH+rVq1UkyZNtNdDhgxROp1O/f7772mO8eOPPyqllPrrr7+UTqdTn376qVFZU6ZMURUrVlRJSUlKKaUMBoP66KOP0r0uomSIjY1VgFrzfxNUyB/TM93W/N8EBajY2NjCrna+ktun4plWtmxZXFxcmDZtGl9++aXR3JL379/np59+ws/Pj6SkJG1+SVdXV5ydndPML/n4XJWmpqZUr16dRo0aUbZsWW2/q6srMTEx3L17N0f1/PHHHwHw8/Mz2t+vXz+OHDlitKRQVvNxpo6L6927t3ZOiYmJ+Pj4cO3aNS5dugSkdAJatGgR//nPf/jrr79yVF/xbMnq1ml2OuI8KyQoimeaTqcjLCwMNzc3Ro0ahbOzM82aNWPv3r1ER0eTlJTE+PHjMTMzM9ouXryoBY9U6c1VmZO5MTMTHR2NmZmZ0QTzkDKnpVLK6DllVse8efMmSikcHByMzik1qKee1/r16/H29mbGjBnUqlWLOnXqEBISkqN6i2dDkkrK1lYSSFAUzzxXV1eCg4OJjo4mIiICvV5P9+7dsbKyQqfTMWPGDA4fPpxmmzlzZoHV0d7enkePHhEdHW20/++//0an06UJhFmVpdPp+Omnn9I9r4YNGwIpc3X+97//5ebNmxw6dIjatWvTr18/zp49m5enJoqB5OTkbG0F4ZtvvqFhw4ZYWFjg6urKypUrs8yT2hnuyS03Pb+lo40oMczMzPDw8GDq1Kn06NGDv//+m1atWnHy5EnefffdAq3Hky3J1LX5goODGT58uLY/ODhY62WaXd7e3gDcunWL7t27Z5nexMSE5s2b8+6777Jt2zb++usvqlevnu3jieKvqIxT3LdvH7169eLVV18lKCiIPXv2MGzYMKytrenTp0+W+efNm0eHDh2010/OLpYdEhTFM+23335jwoQJ9OvXjxo1ahAbG8v8+fOpWrUqNWrUYOHChXh5edGvXz/69++PnZ0dUVFRfPfddwwdOhRPT888r5Obmxtbt26lXbt2GAwGateuTYMGDXjxxRcJCAjgwYMH1K5dmzVr1rB//362bt2ao/JdXV0ZNWoUL7/8MpMmTaJFixY8evSIP/74g/DwcLZs2UJsbCydO3fm5Zdfpnbt2jx8+JClS5dia2tLkyZN8vycRdGmkpNJTs5i6agCaCm+8847tGjRguXLlwPQoUMHzpw5w9tvv52toFirVq2nHhcsQVE80ypWrEjFihWZP38+ly9fxsbGhnbt2rFmzRpMTU1p3bo1+/btIzAwkKFDh/Lw4UMqV66Mt7c3NWvWzJc6ffLJJ4wdOxZfX18ePHhAeHg4np6erFmzhunTp7NgwQJu375NnTp12LhxY7Zae0/66KOPqF27Np9++ilz5szBysqK2rVrax15LCwsqF+/PkuXLuXixYtYWlrSrFkzwsLCcHBwyOtTFkXco6RHlErKfPmprIZsPK2EhATCw8ONxhAD9O/fn6+++orz588brSiUX2TuUyGEKKFS5z5d+uNgLK3MM0374O5DRrdbzaVLl4zmPtXr9WnWd8yNEydOUK9ePUJDQ+nSpYu2/88//8TV1TXN/sedP3+eatWq4eDgwO3btylbtiw9e/bkvffeS9N5LSvS0UYIIUq45OSkbG2QskKQjY2Nts2fPz9P6pDayezJTmWp69Xevn07w7x6vZ433niDL774gj179jBx4kQ2bNiAt7c3jx7lrIUrt0+FEKKEy8ncp+m1FDMSGxvL1atXszz+03bscnR0ZNmyZdprDw8P6tWrx/PPP8/mzZvp27dvtsuSoCiEECVcdsYhpr5fpkyZbC8dFRwczGuvvZZlupMnT2otwtjYWKP3UluQOb0N2rVrVwwGA7/88kuOgqLcPhVCiBIu+Z/ep5lvOe99+uqrr6KUynKrU6cONWrUwMzMjFOnThmVkfq6Tp06eXKuWZGgKIQQJVxS8iMSkzLfkpLzt/epXq+nQ4cObNy40Wj/+vXrcXNzy3HP0+3bt3Pv3j2aN2+eo3xy+1QIIUq4lNunmbeRCmKat7feegtPT09GjhxJ3759CQ8PZ926daxfv94oXalSpRgyZAgrVqwAYMKECZiYmNCyZUtsbW05dOgQ8+fPp1mzZrzwwgs5qoMERSGEKOGyM41bQUzz1rZtW0JCQpg5cyYrVqygSpUqfPHFF2kmyk9KSiIp6d8gXbduXZYtW8Znn33G/fv3qVSpEsOGDWP27Nk5XstSxikKIUQJlTpOcVZoZywMZpmmjb/3iFm+u4iNjc12R5viSFqKQghRwqV0pMn89mlW08A9KyQoCiFECZekkrMxJKNgVskobBIUhRCihEtKekRiUuZP0pKSEguoNoVLgqIQQpRwycmK5OTMg2JW7z8rJCgKIUQJl5Np3p51EhSFEKKEk5bivyQoCiFECZesshEUS8joPQmKQghRwiUrlWXQk6AohBCiREhMTMI0UZdlmpJAgqIQQpRw0lL8lwRFIYQo4VRy1h1pSkjnUwmKQghR0qUMycj89qkMyRBCCFEiyJCMf0lQFEKIEk6C4r8kKAohRAmnlCKrVQRLyiqDma8VIoTIF/7+/ri7uxd2NYQAIDEpicTELLYkGZIhhMgnb731Fvfu3SvsaggByJCMx0lQFKIQ1KhRo7CrIIRGnin+S26fCpEDqbc9IyIiaNy4MQaDgeeee45ffvlFSxMfH09AQABOTk5YWFjQqFEjNm/enG45qWJiYnjttdeoVKkSFhYWODs7079/f6M8UVFRvPTSSzg4OGBpaUn79u2NjitEbiUnJ2drKwkkKAqRQ9euXWPMmDFMmjSJDRs2EB8fT69evXj06BEAgwYN4tNPP2Xy5Mls2bKFunXr0rt3b7Zt25ZhmQEBAWzfvp158+axa9cuFi5ciF6v196Pjo6mbdu2HD16lKVLl7Jp0yYMBgNeXl5cv349389ZPNvUP7dPM9tKSkcbuX0qRA7dvn2bH374gXr16gFgMBjo0KEDBw8epEyZMoSEhLB8+XJef/11ALp06cL58+eZPXs2PXr0SLfMQ4cOMXDgQIYMGaLte7ylGBQURExMDIcOHaJ8+fIAeHt74+rqyqJFi3j//ffz63RFCSC3T/8lQVGIHHJyctICIkDdunWBlNubt27dAsDPz88oT79+/Rg/fjz37t3DYDCkKbNJkyasWrUKR0dHunTpkqZnalhYGB06dMDe3p7ExEQATE1N8fDw4PDhw3l6fqLkSUxKQpdF51LpfSqESJetra3Ra3NzcyDlWWJ0dDRmZmbY29sbpalQoQJKKWJiYtINikuXLsXe3p7FixczadIknJ2dmTZtGm+88QYAN2/e5MCBA5iZmaXJK512xNNKzsbcpyXkkaIERSHykr29PY8ePSI6Oho7Oztt/99//41Op0sTUFPZ2NgQFBREUFAQx44d48MPP2TkyJG4u7vTrl077O3t6dKlC++8806avI8/exQiN+T26b+ko40Qeaht27YABAcHG+0PDg7WeqtmpX79+ixZsgSAkydPAuDj48OJEydwc3OjWbNmRlv9+vXz+CxESZNVJ5vsjGN8VkhLUYg81KBBA1588UUCAgJ48OABtWvXZs2aNezfv5+tW7dmmK9Nmzb06tULd3d3TE1NWb16Nebm5rRr1w5I6Z26du1aPDw8GDt2LFWqVOHGjRscPHgQJycnxo8fX1CnKJ5BySobLUUJikKI3FizZg3Tp09nwYIF3L59mzp16rBx40a6d++eYZ42bdqwevVqzp07h4mJCfXr1+ebb77Bzc0NgLJly3LgwAFmzpzJlClTuHXrFuXLl6dly5b06tWroE5NPKNUctbrJZaQlaPQqZIy+EQIIYSRuLg4bGxs8Hm3CqUsMn+alhifzO6ZF4mNjaVMmTIFVMOCJy1FIYQo4VJ6n2adpiSQoCiEECWcSlaoLJ4pZvX+s0KCohBClHDSUvyXBEUhhCjhpKPNvyQoCiFECSfrKf5LgmI+Sk5O5sqVK1hbW6PT6Qq7OkKIIk4pxZ07d3BycsLEpODmVklKAl1i1mlKAgmK+ejKlSs4OzsXdjWEEMXMpUuXqFy5coEdTzra/EuCYj6ytrYG4K8//9T+nVfy88dT2rT/0j3Mnwcp6mxcvpQLcPE/4flS7pFvfsqXcgFuP8i/65FfdLq8b8nFq0cEqh15/n2RFelo86+nCor+/v78/PPP/P7773lVn2dK6i1Ta2vrPB/sKkGxYORbULTKv0/QytwyX8q11JnnS7kAFrq0q38UdSb5EBQBUBT44xalUras0pQETxUU33rrLe7du5dXdRFCCFEIZJWMfz1VUCyoddwePHiApWX+/PUrhBAlXjaGZFBCbp8+Vfvf399fWyF81apV6HQ6jhw5gq+vLwaDgVq1arF69eo0+Xbs2EGbNm0oXbo0dnZ2eHp6cuTIEQAiIiLQ6XTs2LGDPn36UKZMGW0V85iYGEaOHImjoyN6vZ6mTZsSFhaWpuyOHTtSvnx5ypQpQ4sWLfj222+N0sTExPDaa69RqVIlLCwscHZ2pn///kZpoqKieOmll3BwcMDS0pL27dvzyy+/PM3lEkKIIin1mWJWW0mQ5zfFBw0aRKdOndiyZQuNGzfG399fWxMOYP369XTv3p3y5cuzbt061q5dS5s2bbh8+bJROcOHD6dGjRps3ryZiRMn8vDhQzp27Mj27duZO3cu27Zto27dunTr1o1jx45p+c6dO0f37t353//+x6ZNm2jTpg1du3YlIiJCSxMQEMD27duZN28eu3btYuHChUYLtUZHR9O2bVuOHj3K0qVL2bRpEwaDAS8vL65fv57huSckJBAXF2e0CSFEUZeUpLK1lQR53vv0zTffZOTIkQC0bt2aHTt2sGnTJmbOnIlSiokTJ9KpUyc2b96s5enatWuacnr06MF7772nvV65ciVHjx7l119/pW7dugB07tyZP//8k3feeYcNGzZox0+VnJxMhw4dOH78OJ999hmenp4AHDp0iIEDBzJkyBAt7eMtxaCgIGJiYjh06BDly5cHwNvbG1dXVxYtWsT777+f7rnPnz+f2bNn5+h6CSFEYZMZbf6V5y3FTp06af82GAy4uLgQFRUFwOnTp4mKiuKVV17Jspxu3boZvQ4LC6N+/fq4urqSmJiobR07duTw4cNauqioKIYMGUKlSpUoVaoUZmZmhIWF8ccff2hpmjRpwqpVq1i0aFG6PWfDwsLo0KED9vb22nFMTU3x8PAwOtaTpk2bRmxsrLZdunQpy/MUQojClqyycfu0ABqK3333HQMHDqRGjRrodDqjRk5WYmNjGTZsGPb29lhbW9OnTx+uXr2a4zrkeUvR1tbW6LW5uTnx8fEA3Lp1CwAnJ6csy6lQoYLR65s3b3LkyBHMzNJ23TY1NQVSWoY9evQgNjaWOXPmULNmTQwGA2+//TYXL17U0i9duhR7e3sWL17MpEmTcHZ2Ztq0abzxxhvasQ4cOJDusTLrXKTX641uwwohRHFQVAbvf/vtt/z66694eHhw+/btHOXt168fx48fZ/ny5VhYWDBjxgx8fX35+eefKVUq+6GuQAfvly1bFkiZ6SUrT47Tsbe3p0GDBqxYsSLDPH/99RdHjhxhy5Yt9OzZU9v/4MEDo3Q2NjYEBQURFBTEsWPH+PDDDxk5ciTu7u60a9cOe3t7unTpwjvvvJPmGBL0hBDPmuRk0BWBwfsLFy5k8eLFAOzZsyfb+SIjI9m1axe7du3S7lbWrl0bNzc3QkJC6Nu3b7bLKtCgWLt2bSpXrszKlStzVEkAHx8fdu7ciZOTU4YtzdTgZ27+7yDjCxcu8NNPP+Hq6ppunvr167NkyRJWrFjByZMnadeuHT4+PqxZswY3NzcMBkOO6imEEMVNURm8n9v5XkNDQ7G1taVjx47avtq1a9OoUSN27txZdIOiTqdj0aJFDBgwgN69ezN48GD0ej2RkZE0b96c559/PsO8gwcP5tNPP8XT05OJEyfi6upKTEwMR44c4eHDh8yfP586depQuXJlpk6dSlJSEnfv3iUwMJBKlSoZldWmTRt69eqFu7s7pqamrF69GnNzc9q1awek9E5du3YtHh4ejB07lipVqnDjxg0OHjyIk5MT48ePz9frJIQQBenRg2SSErMYvP8o5f0ne9UXhcdGp06donbt2mnuMLq5uXHq1KkclVXgc5/269eP0qVLM3fuXPr374+FhQVNmjShV69emebT6/Xs2bOHWbNmMXfuXK5evYqDgwONGzfWervq9XpCQkIYNWoUfn5+ODs7M3PmTPbs2cPPP/+sldWmTRtWr17NuXPnMDExoX79+nzzzTe4ubkBKbd5Dxw4wMyZM5kyZQq3bt2ifPnytGzZMst6CiFEcWFubk7FihU5+9W1bKW3srJKs8hBYGAgs2bNyofaZV90dHSa/iwAdnZ2OX42qVOqpMxoV/Di4uKwsbHh72vXitXcp/mlWM6pWgwvtC4xfx7+JJ3I2ZdLTnzXNf1hTk8r6s6NfCkXIDE5i7WWciFePWKq2kZsbGyef2dkeMz4eB4+fJittEqpNK2xzFqKsbGx2eoBWr16daPHXgBVq1bl+eef5+OPP84yf8eOHTE1NU0zUcubb76ZZvRBVmSVDCGEKMEsLCywsLDIl7KDg4N57bXXskx38uRJ6tSpk+vj2NnZpTsELjo6Gnt7+xyVVXCrWOaxx6eYy28xMTHMmjWLEydOFMjxhBDiWfDqq6+ilMpye5qACFCnTh1Onz7Nkzc+T506leOyi21QLEgxMTHMnj1bgqIQQhRBvr6+REdH8/3332v7/vjjD44cOZLujGmZkdunQgghioQLFy5os4bdv3+fM2fOsHHjRgD69OmjpStVqhRDhgzRxq23atWKzp0788orr7B48WJt8H6DBg148cUXc1SHYt9SjIiIoHHjxhgMBp577jmjlSyUUixatAhXV1f0ej3Vq1dnyZIlRvlPnTpF//79cXZ2pnTp0tStW5fFixeT/M9I1fPnz1OtWjUA/Pz80Ol06HQ6zp8/X2DnKIQQJUF4eDh+fn74+flx48YNvv32W+3145KSkkhKSjLat379ejp27Mjw4cMZOHAgtWrVYufOnTmazQaKeUvx2rVrjBkzhqlTp2JjY8O0adPo1asXZ86cwczMjLFjx/LFF18wY8YMWrRowf79+5kyZQqWlpaMGDECgMuXL1O7dm0GDRqEtbU1R48eJTAwUBvj6OjoSEhICC+++CLz5s2jQ4cOADg6OqapT0JCAgkJCdprWSVDCCGyz9/fH39//yzTpTdowsbGhhUrVmQ661l2FOugePv2bX744Qfq1asHpExA3qFDBw4ePIijoyMff/wxy5cvZ/jw4UDKrDj3799n9uzZDB8+HBMTE7y9vfH29gZSLnTbtm25f/8+H3/8MYGBgej1eho3bgxArVq1aNmyZYb1kVUyhBCieCvWt0+dnJy0gAhoS0pFRUWxe/duAHr37m20qoaPjw/Xrl3Tuu/Gx8cTGBhIzZo10ev1mJmZMWPGDK5evcrdu3dzVB9ZJUMIIYq3Yt1STG9FDkgJdDdv3kQphYODQ7p5L126hIuLC1OmTOHzzz8nMDCQpk2bYmtry9atW3n33XeJj4/Hysoq2/UpCtMdCSGEyL1iHRQzY29vj06nY9++fWlmSoCUyWIhZXDp66+/zpQpU7T3duzYUWD1FEIIUXQ8s0Ex9TnhrVu36N69e4bpHjx4YBQ0k5KS+Prrr43SPN4CFUII8ex6ZoOiq6sro0aN4uWXX2bSpEm0aNGCR48e8ccffxAeHs6WLVuAlDnzPv/8c+rWrYuDgwPLli0z6kEKULFiRWxtbfnqq6+oVq0aer2eBg0apNsCFUIIUXwV6442Wfnoo4949913+frrr+nWrRsvvfQS69evx8PDQ0uzdOlSPDw8GD16NMOGDaN+/fpMnz7dqBwTExNWrlzJuXPn8Pb2pnnz5tlaKFkIIUTxIqtk5KP8XCWjWMrHlbuTj93Ml3L/3vZL1olyITribL6UC3Dit+P5Uu6tB7H5Ui5AsnwNASmrZEznmwJdJUMYe6ZbikIIIURO5Cgo5vfKFI0aNcrWbAZCCCFEfpCWohBCCPGPYhUUlVJpeoYKIYQQeSVXQTE0NBR3d3csLCxo2rQpBw4cMHp/1apVNGjQAAsLCypVqsSMGTPSzGi+f/9+mjZtioWFBe7u7oSGhqY5Turt2p07d9KwYUP0ej3ffPMNACEhITRq1AgLCwucnJwICAhIM47wwoUL9OnTBxsbGwwGA507d+bYsWNGaapWrcqbb75JUFAQzs7OWFtb4+/vT0JCAkePHqVNmzbaChxP5hVCCPFsyfE4xatXrzJy5EhmzZqFnZ0dCxYsoHPnzvz555+UL1+eDz74gMmTJzN+/HgWL17MyZMntaC4YMECIGV1i86dO1O/fn02bNhAdHQ0b7zxBvfu3aNRo0ZGx7ty5Qpjxoxh5syZVKlShSpVqrBt2zb69OlD//79WbBgAadOnWL69OlcvHhRW3vrzp07eHp6YmJiwvLly7GwsGDu3Lm0b9+e3377DWdnZ+0YW7duxd3dnU8//ZSzZ88SEBCAubk5kZGRBAQEUKFCBaZMmYKfnx8nTpzAxCT9vyVklQwhhCjechwUb9++TXBwMF5eXgB4eHjg7OzMkiVLmD59OoGBgUyePJl58+YBKYPjzc3NCQgIYNKkSZQtW5agoCB0Oh2hoaHY2NgA4OzsrM1C87jo6GhCQ0Np0aKFtq9v3760bNmSdevWAdClSxdKly7N66+/zrFjx6hfvz4rV67kwoULHD9+HDc3N62uVapUISgoiMWLFxsdZ+vWrdpg/IiICD7//HNCQ0Pp0qULAMnJyXTv3p1jx47RsGHDdK+NrJIhhBDFW45vn9rY2GgBMfW1j48PBw8eZP/+/dy9exc/P780K1M8ePCA33//HYCDBw/SoUMHLSACeHl5YW9vn+Z4ZcuWNQqId+/e5ejRo0arMAP069cPgH379gHw448/4u7urgVESJkPtWPHjlqaVB4eHkaz07i6umJiYmJ0nq6urgCZrnwhq2QIIUTxluOWYrly5dLsq1ChAidPnuTmzZQB1E2aNEk3b2qQuHr1KjVr1kzzfvny5dMt+3ExMTEopdLst7GxQa/Xc/v2bSClhflkmtTyUoNzqvRW27C0tDQKlNmZ/1RWyRBCiOItx0Hxxo0bafb9/fffODo6ai29kJAQo2d2qapVqwakrFp//fr1NO+nt0+n0xm9trW1RafTpUkbGxtLQkKCVgd7e3tOnz6dbl3Ta5EKIYQQOb59Ghsby549e4xe7969mxYtWtCqVStKly5NVFQUzZo1S7OVLVsWgOeee47w8HBiY/+dNmrPnj1aKy8zVlZWNGrUSOtQk2rDhg0AtG3bVvv/sWPHjAJjdHQ0u3fv1tIIIYQQj8txULS3t2fYsGGsXr2abdu24evri1KKcePGYWtry5w5c5g8eTJTpkwhNDSUsLAwli9fjq+vL/fv3wdg3LhxJCcn4+vry7Zt2/jyyy955ZVXtKCZlVmzZhEZGclLL73Et99+y4cffsi4cePo3bs39evXB2Do0KG4uLjQrVs3vv76a7Zs2UKnTp0oVaoU48aNy+lpCyGEKAFyfPvU0dGR9957j0mTJnHmzBnq1avHrl27tOd3EyZMoFKlSnzwwQcsXboUMzMzatSowfPPP689l3N0dCQ0NJQxY8bg5+dHjRo1+OSTT5gxY0a26tCjRw+Cg4OZM2cOPXv2xN7enuHDhzN//nwtjbW1NREREQQEBDB8+HCSkpJo06YNe/fuTffWbn5InWv9zp07BXK8Ii8/JwS/lz/X+E7C/Xwp925i/q3N+UA9zJdy49WjfCkXZELwVPGkXGNZp6HwyCoZ+SgqKqrAArAQ4tlx6dIlKleuXNjVKJEkKOaj5ORkrly5grW1dZoOQ+mJi4vD2dmZS5cu5emyMcWt3Pwsu7iVm59lS53zv9yclq2U4s6dOzg5OWU4SYjIXzm+fSqyz8TEJFd/7ZUpUyZf1lIrbuXmZ9nFrdz8LFvqnP/l5qTsx8dvi4Inf4oIIYQQ/5CgKIQQQvxDgmIRotfrCQwMzPNZcYpbuflZdnErNz/Lljrnf7n5XbbIe9LRRgghhPiHtBSFEEKIf0hQFEIIIf4hQVEIIYT4hwRFkW/++usvRowYQaNGjShVqhTu7u7ppluxYgWurq5YWFjQsGFDtm/fnq3yr1y5Qu/evbG2tsbe3p5XX32VuLi4vDyFAnP37l0qV66MTqfj559/zjStUooFCxZQpUoVLC0tadWqFQcOHCigmuaf7P68POlZvR6icEhQFPnm+PHj7Nixg5o1a1K3bt1003z99de89tpr9OvXj9DQUFq1akWvXr2y/FJ79OgRnTt35o8//mDdunX85z//YdeuXQwcODA/TiXfvfPOOyQmJmYr7XvvvUdgYCDjx49n+/btODo60qlTJ86ePZvPtcxf2fl5Sc+zej1EIVFC5JOkpCTt30OGDFH16tVLk8bV1VUNGDDAaF+rVq2Ur69vpmWvW7dO6XQ6derUKW3frl27FKAOHjz4lDUvWCdPnlQGg0EtX75cAerw4cMZpn3w4IEqU6aMmjZtmrYvISFBubi4qDfeeKMgqptvsvPz8qRn+XqIwiEtxUIUGhqKTqdDp9MZrRDSq1cvdDodBoOBP/74oxBr+HSymrvx7Nmz/PHHH/Tt29dof//+/fn+++9JSEjIMG9oaCgNGjSgdu3a2r6OHTtib2/Pzp07n67iBWz06NGMGDHC6Fwysn//fuLi4oyumbm5OS+++GKxO+8n5Wauz2f5eojCIUGxEPn6+jJ8+HAAFi5cyLFjx9iwYQNbtmwBUm4Lubq6FmIN89epU6cAqFOnjtF+Nzc3Hj58yLlz5zLN+2Q+nU5HnTp1tHKLg40bN3Ls2DHefvvtbKXP7JpdvHiRBw8e5HkdizK5HiKvSVAsZIsXL6Z69eo8evSIoUOHMnr0aAB8fHwYNWpUIdcuf0VHRwNga2trtN/Ozg6A27dvZ5r3yXypeTPLV5Tcv3+fgIAA5s2bl+1JqKOjo9Hr9VhYWBjtt7OzQymlXdOSQq6HyGsSFAuZlZUVq1evxsTEhF9++YXr169jY2PDypUrs7XclCi+3n33XSpUqMDQoUMLuypCiH9IUCwC2rRpQ7NmzbTXPXr0KBELjKa2CGNjY432p/51b29vn2neJ/Ol5s0sX1Fx4cIFFi9ezOzZs4mNjSUmJoa7d+8CKcMzUv/9JDs7OxISEoiPjzfaHx0djU6n065pSSHXQ+Q1CYpFwJdffsmhQ4e012vXrmX//v2FWKOCkfoc6MlngKdOncLc3Jzq1atnmvfJfEopTp8+neb5UlF07tw5Hj58SLdu3bCzs8POzo7u3bsD0KFDB3x8fNLNl3pup0+fNtp/6tQpbZxeSSLXQ+Q1CYqF7NKlS4wdOxZIaSG6ubmRnJzMkCFDuH//fiHXLn9Vr14dV1dXgoODjfavX78eb29vzM3NM8zr6+vLr7/+yp9//qnt+/7777l16xZdu3bNtzrnlUaNGhEeHm60LVmyBIDly5ezbNmydPO1bt2aMmXKGF2zR48eERISUizOO6/J9RB5rpCHhJRoycnJytvbWwHKzs5OXb16VUVGRioTExMFqJEjRxZ2FZ/KvXv3VHBwsAoODlaenp7K2dlZe339+nWl1L/jDd9++20VHh6uRowYoUqVKqX279+vlXP+/HllamqqZs+ere17+PChcnd3V/Xr11fffPONWr9+vXJ2dlbdunUr8PPMK+Hh4WnGKXp5eakaNWoYpZs/f77S6/UqKChIff/996p3797K2tpanTlzpqCrnKey8/NSkq6HKBwSFAvR0qVLFaAAtWrVKm3/hAkTFKB0Op0KCwsrxBo+nXPnzmnn9+QWHh6upfviiy9UzZo1lbm5uRbk0isnMDDQaH9UVJR68cUXlZWVlbK1tVWvvPKKio2NLYAzyx/pBUUPDw/l4uJilC45OVnNmzdPVa5cWen1etWiRQujPyKKq+z8vJSk6yEKh6ynKIQQQvxDnikKIYQQ/5CgKIQQQvxDgqIQQgjxDwmKQgghxD8kKAohhBD/kKAohBB5yN/fH3d398KuhsglGZIhhBB56MyZM9y7d48GDRoUdlVELkhQFEKIYighIQEzM7NcLc4sMiZXUwgh+Pe2Z0REBI0bN8ZgMPDcc8/xyy+/aGni4+MJCAjAyckJCwsLGjVqxObNm9MtJ1VMTAyvvfYalSpVwsLCAmdnZ/r372+UJyoqipdeegkHBwcsLS1p37690XEBqlatyptvvsn777+Pi4sLlpaWxWbt0OKkVGFX4P/bu/uomLM/DuDvbxlFk9FMTcTWyGYm5KQ1NqUtT4mRisXpoHDW7mIfaLdVrUU2Nlsd0WKd46Fi7dlCJQ+d7PFURK2TpVhCrNpMtYY2amum+/tD8z2+zUNp2fan+zpn/pj7vY9fztzu/d75DEVR1H/FgwcP8MknnyAiIgICgQCRkZEICgrC7du3wePxMHfuXOTk5GD9+vWQyWRITU3FzJkzkZmZienTp+utMywsDMePH0dsbCwkEgmqqqpw/Phx9rpKpcLYsWPB5/ORlJQEgUCApKQkjB8/HmVlZRCLxWzegwcPwsnJCZs3b4apqSksLCxe+T3pdroyxhxFUdR/RWhoKGEYhpSUlLBp2ni0eXl55NdffyUAyPfff88pN2bMGOLm5sapZ9iwYez7YcOGkbCwMIPtrl69mggEAqJUKtm0xsZGYm9vT8LDw9k0BwcHIhKJSH19/T8aJ2Uc3T6lKIpqZWdnh2HDhrHvhw4dCuDZ9mZeXh4AYNasWZwyc+bMQXFxMZ48eaK3Tjc3NyQnJyM+Ph4lJSU613NzczFu3DgIhUKo1Wqo1WqYmprC29sbRUVFnLw+Pj50dfiK0UmRoiiqVd++fTnvtb/p2djYCJVKBR6PB6FQyMlja2sLQggePXqkt86kpCTMnz8fCQkJcHFxgb29PbZv385er62tRWZmJng8Hue1d+9e3L9/X6ct6tWizxQpiqI6QCgUorm5GSqVClZWVmy6UqkEwzA6E6qWQCBAYmIiEhMTcfXqVWzevBlLly7F8OHD4eXlBaFQCD8/P3z99dc6Zc3MzDjvGYZ5qWOidNGVIkVRVAeMHTsWAJCens5JT09PZ0+rtsfFxQWbNm0CAFy/fh0AMHHiRFy7dg3Ozs4YNWoU5+Xi4vKSR0G1h64UKYqiOmDEiBGYMWMGwsLC0NDQAKlUin379uH8+fPIysoyWM7T0xNBQUEYPnw4TE1NkZqaip49e8LLywvAs9OpP/zwA7y9vfHpp5/C3t4eNTU1uHjxIuzs7LBixYp/a4gU6KRIURTVYfv27UNUVBRiY2Px8OFDyGQyHDhwAP7+/gbLeHp6IjU1FeXl5TAxMYGLiwuys7Ph7OwMABCJRLhw4QJWrVqFlStX4s8//4RYLIa7uzuCgoL+raFRWl19/JX672t7xPy/3IdNmzaRo0eP/gs96hxvb2+iUCi6uhvtKi4uJmvWrCFPnjzhpO/Zs4cAIDU1NS+lHaVSSfh8Prl69arRfJs2bSLPf1yVl5cTAOyLYRhiZ2dHgoODyd27dzllJ06cSGJiYl5Kf6nXH32mSLXrq6++wv79+7u6Gx2SmJiIY8eOdXU3DNq2bRsSEhK6uhvtunz5MqKjo/H06VNOukKhQEFBgcFDJS9q/fr18PHx6XQA7Q0bNqCgoAD5+fmIjY1FQUEBpk6dCo1Gw+aJiopCfHw8VCrVS+kz9Xqj26dUuwYPHtzVXXhtaL/31hUaGhrQq1evf1SHjY0NbGxsXkp/6uvrsWvXLuzdu7fTdTg5OcHd3R0A4OHhgT59+iAwMBA3btxg7/W4ceNgZWWFlJQULF++/GV0nXqN0ZUihdLSUkydOhUikQi9e/eGVCrFt99+y17X91M4+fn5GDlyJMzNzTFixAicOHECrq6uWLBggU45Y7EkASAhIQFyuRwCgQBisRjTpk3DzZs3X3gcEokE9+7dw9atW8EwDBiGQXJyMgCgpaUFMTExkEgkMDMzg0wmw44dOzpUr4+PD6ZNm4YDBw5AKpWCz+dj/PjxuH37Niffw4cPsWjRIjZ+pYeHB86ePau3Lq2KigrMnj0btra2MDc3x6BBg3QOVly/fh0BAQEQCASwsLCAQqHQabutu3fvsuNfvHgxRCIRRo8eDQA4evQoJk2aBLFYjD59+uDtt99GTk4OWzY5ORkLFy4E8GwSZBgGEomEvcYwDGpra19o3PocOHAAADBlyhROel1dHUJCQmBpaQkbGxt88cUXUKvV7dYHAJaWlgCA5uZmTvqsWbOQkpLSoTqo7o1OihT8/f2hUqmwa9cuHD16FJ9//rnB6BwAUFVVBT8/P1haWiItLQ3h4eFYsmQJKisrdfJqY0mGh4cjLS0NjY2NCAoK4nxoVVRU4KOPPkJWVhZ27tyJlpYWeHh4vHCw44yMDPTr1w/vvvsuCgoKUFBQAIVCAQAIDw/H2rVrsWDBAmRnZ8PX1xcffvghvvvuuw7VffnyZcTFxSE2NhbJycm4desW5s2bx17XaDSYMmUKsrOzsXHjRqSnp4PP52PSpEk6fwQ8LyQkBFeuXMGWLVuQk5OD6OhoztbfnTt32HuRnJyM/fv3o6amBhMmTMDff//dbr8jIyNBCMGPP/6IuLg4AEB5eTn8/f2xd+9eHDx4EJ6enpg6dSpOnz4N4NkW6apVqwAAOTk5KCgo0Al6/U/HDQA///wz3NzcYG5uzklftGgRMjIyEBsbi5SUFFy7dg2JiYl662hpaYFarUZTUxOuX7+OtWvXQiaT6fwR5+HhgcuXL6Ompqa9W0Z1d139UJPqWjU1NQQAOXz4sME8bQ+5hIeHE4FAQOrq6ti0vLw8AoCEhoZyyhmLJamPWq0mT58+JXw+n+zYscNgHwxxcHAgy5Yt0xkjj8cjERERnPTg4GBiY2ND1Gq10Tq9vb2JhYUFqa6uZtO0B07u379PCCEkKyuLACA5OTlsnqamJmJvb09mzJjBqev5gzYWFhZky5YtBtsOCQkhjo6OpKGhgU2rrq4mfD6fbN261WA57UEUPz8/o2PTaDSkubmZ+Pr6kuDgYJ3xtT1Q0za9o+PWZ8iQITr/VqWlpYRhGLJr1y42Ta1Wk0GDBhk9aKN92dvbk9LSUoP348iRI0b7RFF0pdjNiUQiODg4IDIyEikpKaioqGi3TFFREcaNG8duVQHPvtjcNvwVYDyWpNaFCxcwadIkiEQi9OjRA71790Z9fb3RLVRtjEjty5iLFy+iublZb8zKmpoath2NRmOwTldXV86ztLbjyMvLQ58+fTB58mQ2D4/Hw4wZM5Cfn2+wb25uboiPj8f27dtx69Ytneu5ubmYPn06evTowfbLysoKI0eO1ImLqY92pfy8iooKhIaGYsCAAejRowd4PB5yc3M7tWXd2XEDz3Yc2j6fLCoqAiGE81UEU1NTBAYG6q1j48aNKCoqQmFhITIyMmBnZwc/Pz+dXQtra2u2TYoyhk6K3RzDMMjNzYWzszOWLVuGN954A6NGjTL6TEjfhxkAzk/caBmLJQkAv//+O3x9faHRaLBjxw6cO3cORUVFEIvFbB592saJNEZ76rBt3Ejte+027eDBgzl13r17t8PjUKlUesdva2trdBv4p59+woQJE/Dll1/CyckJMpkMhw4dYq/X1tYiMTFRZ7x5eXk6cTH1aTvmlpYWTJ8+Hfn5+Vi3bh1OnTqFoqIiTJkyxej9NqSz4wae3bu2YcyqqqrA4/E4YdT0jUPL0dERo0aNglwuR2BgIA4fPozKyko2aoyWtp2GhoZ2x0R1b/T0KYUhQ4YgPT0dzc3NOH/+PKKiouDv74/Kykrw+Xyd/P3799f7bKa6uvqF287JyUF9fT0OHTrETjxqtbrdD9SOrJK0tCvY6upqDBgwgE1XKpWc69nZ2ZzndHZ2di/Uhr7xK5VKvStorf79+2P37t3YuXMnLl26hJiYGMyZMwc3btyAo6MjhEIhFAoFli5dqlP2+ZW6IW1jZd66dQvFxcXIzMxEQEAAm97ZyaKz49aWbRtEu3///gbji3aEjY0NrK2tUVpayknXtiMSiTpUD9V90ZUixeLxePD29kZERATq6urwxx9/6M0nl8tx8uRJ/PXXX2xaXl5ep34FvKGhAQzDcFZ7aWlp7W6Jto0RqdWzZ0+dFc/o0aPB4/F0YlampaVBLBZjyJAhAJ7FpXy+Tu1qsCPGjh2Luro65ObmsmlqtRoZGRlszExjTExMIJfLERMTA7VazW6lTpw4ESUlJRg5cqTOmKVSaYf7p6Wd/J4f271793Du3DlOvrYrYUP+ybilUinKy8s5aXK5HAA4B3s0Gg0yMzON1qWlVCpRW1vLbpdqaVf9nblnVPdCV4rd3JUrV/DZZ59hzpw5GDx4MB4/foxvvvkGEonE4PcTV6xYgW3btkGhUCA8PByPHj1CdHQ0rK2tYWLyYn9njR8/HgCwcOFCfPDBBygtLUVCQkKnvxzu7OyMkydP4sSJE7CyssKgQYNgbW2Njz/+GHFxcTA3N4e7uzuOHTuG/fv3IykpCaampp1q63kKhQKjR4/GvHnzEBsbC1tbWyQlJaGqqgpRUVF6yzx+/BiTJ0/G/PnzIZVK0dTUhKSkJPTt2xdubm4AgOjoaMjlckyePBnvv/8+bG1t8eDBA5w5cwZeXl4IDg5+oX7KZDIMHDgQERER0Gg0qK+vx5o1azgraABsCLKtW7ciMDAQvXv31hucujPj1vL09ERaWhonbejQoQgKCsLy5cvR2NgIiUSCbdu2oampSW8dZWVluHDhAgghqKysRFxcHBiGweLFizn5fvnlF/D5fLi6urZ3i6jurqtP+lBdS6lUknnz5hFHR0diZmZGxGIxmTlzJrl58yabR9/Jz7NnzxJXV1fSs2dP4uzsTI4cOUIkEglZvny50XIqlYoAIHv27GHTUlNTiaOjIzE3Nyfu7u6ksLBQ5xRpR0+flpSUEC8vL2JpaclpR6PRkHXr1hF7e3vC4/GIk5OTzi+oG6IvNFtxcTEBQE6dOsWm1dbWkgULFhChUEjMzMzImDFjyOnTpw3W1djYSN577z0ilUpJr169iFAoJL6+vqSwsJBT5ubNm2T27NlEJBIRMzMzIpFISEhICOdUb1va05bp6ek61woLC4lcLifm5ubEycmJpKSk6L2/a9euJQMHDiQmJibEwcGBEKL/VGpHxq3PpUuXCADO/zVCnv0fmTt3LrGwsCAikYiEhYWRuLi4dk+fWltbkwkTJpAzZ87otOXv70/mz5/fbp8oiiGEkC6aj6nXSFlZGWQyGXbv3o3Q0NCu7g71f+Ktt95CQEAAVq9e/craUKlU6NevH06cOIF33nnnlbVDvR7opEh1SmRkJEaMGAE7OzvcuXMHGzZsQENDA3777Te9h3MoSp+srCwsWbIE5eXlOidRX5Z169bh9OnTOHny5Cupn3q90GeKVKc0NTVh5cqVUCqV6NWrF3x8fBAXF0cnROqFBAQEoKysDPfv38ebb775StoQCoXYsmXLK6mbev3QlSJFURRFtaJfyaAoiqKoVnRSpCiKoqhWdFKkKIqiqFZ0UqQoiqKoVnRSpCiKoqhWdFKkKIqiqFZ0UqQoiqKoVnRSpCiKoqhW/wPkIo2PnzzY1wAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "variations_table_plot = {k:variations_table[k] for k in variations_table if k in ['time stretching', 'pitch shifting', 'noise']}\n", "fig = benchmark.plot_variations(variations_table_plot, show_diff = True, figsize=(4.6, 4.2));\n", From ebd993074de7268ad0088fb56ae44e250b331a8b Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Thu, 21 Mar 2024 15:30:36 +0100 Subject: [PATCH 17/21] Update notebooks --- examples/speech/audio_from_hf.ipynb | 167 +++++++++++++------------- examples/speech/getting_started.ipynb | 1 + 2 files changed, 87 insertions(+), 81 deletions(-) diff --git a/examples/speech/audio_from_hf.ipynb b/examples/speech/audio_from_hf.ipynb index 171a7dd..4d6bca0 100644 --- a/examples/speech/audio_from_hf.ipynb +++ b/examples/speech/audio_from_hf.ipynb @@ -26,9 +26,9 @@ "name": "stderr", "output_type": "stream", "text": [ - "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + "/home/moscato/miniconda3/envs/ferret-testing/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n", - "/home/giuseppe/miniconda3/envs/ferret_0.5.0/lib/python3.9/site-packages/pyannote/audio/core/io.py:43: UserWarning: torchaudio._backend.set_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n", + "/home/moscato/miniconda3/envs/ferret-testing/lib/python3.10/site-packages/pyannote/audio/core/io.py:43: UserWarning: torchaudio._backend.set_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n", " torchaudio.set_audio_backend(\"soundfile\")\n", "torchvision is not available - cannot save figures\n" ] @@ -66,12 +66,13 @@ "name": "stdout", "output_type": "stream", "text": [ - "cuda:2\n" + "cuda:0\n" ] } ], "source": [ - "device = 'cuda:2' if torch.cuda.is_available() else 'cpu'\n", + "# Note: set the ordinal of the device according to your system.\n", + "device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n", "print(device)" ] }, @@ -219,7 +220,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" ] }, { @@ -278,12 +279,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.47325948, -0.45515063, -0.10200211, -0.15734437, -0.12148061,\n", + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.47325889, -0.45515126, -0.10200202, -0.15734476, -0.1214807 ,\n", " 0.0109534 ],\n", - " [ 0.07733697, -0.02064097, 0.34651279, -0.01588559, -0.01463729,\n", + " [ 0.07733703, -0.02064097, 0.34651214, -0.01588559, -0.01463729,\n", " -0.02365428],\n", " [-0.01432282, -0.01848161, -0.00988954, -0.00070852, -0.01123005,\n", - " 0.32860303]]), explainer='loo_speech+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + " 0.32860351]]), explainer='loo_speech+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" ] } ], @@ -307,12 +308,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 2.73476301e-01, -2.75996308e-02, 2.68968859e-02,\n", - " 4.38230033e-02, -9.83693653e-03, 3.43606501e-02],\n", - " [-4.55664511e-02, 2.00727565e-04, 3.07805104e-01,\n", - " -7.30904579e-03, 8.18154319e-03, 1.45066594e-01],\n", - " [ 7.67946057e-02, -1.63121582e-02, 1.69544374e-01,\n", - " 1.03233484e-02, 6.95427995e-02, 4.02942428e-01]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 2.73476344e-01, -2.75996750e-02, 2.68968727e-02,\n", + " 4.38229965e-02, -9.83699882e-03, 3.43606337e-02],\n", + " [-4.55664854e-02, 2.00781865e-04, 3.07805077e-01,\n", + " -7.30899444e-03, 8.18159192e-03, 1.45066601e-01],\n", + " [ 7.67945654e-02, -1.63121489e-02, 1.69544356e-01,\n", + " 1.03233346e-02, 6.95427875e-02, 4.02942513e-01]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" ] } ], @@ -350,7 +351,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../.cache/torch/whisperx-vad-segmentation.bin`\n" + "Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v2.2.1. To apply the upgrade to your files permanently, run `python -m pytorch_lightning.utilities.upgrade_checkpoint ../../../../.cache/torch/whisperx-vad-segmentation.bin`\n" ] }, { @@ -360,12 +361,12 @@ "Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\n", "Model was trained with torch 1.10.0+cu102, yours is 2.2.1+cu121. Bad things might happen unless you revert torch to 1.x.\n", "Transcribed audio with whisperX into: Increase the temperature in the washroom.\n", - "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.30518979, -0.05905298, 0.02406042, 0.06312685, -0.01027066,\n", - " 0.00634839],\n", - " [-0.00192933, 0.04791304, 0.30365684, 0.01351917, -0.02577572,\n", - " 0.13388124],\n", - " [ 0.07868745, -0.02967894, 0.21510287, 0.02970933, 0.03952176,\n", - " 0.44306288]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + "ExplanationSpeech(features=['Increase', 'the', 'temperature', 'in', 'the', 'washroom.'], scores=array([[ 0.30518981, -0.05905296, 0.02406044, 0.06312683, -0.01027067,\n", + " 0.00634836],\n", + " [-0.00192932, 0.04791306, 0.30365684, 0.01351914, -0.02577573,\n", + " 0.13388129],\n", + " [ 0.0786875 , -0.029679 , 0.21510288, 0.02970933, 0.03952171,\n", + " 0.44306297]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" ] } ], @@ -387,8 +388,8 @@ { "data": { "text/plain": [ - "(EvaluationSpeech(name='aopc_compr_speech', score=[0.3684091712348163, 0.24896600097417831, 0.5148161690682173], target=[3, 4, 3]),\n", - " EvaluationSpeech(name='aopc_suff', score=[0.01417614333331585, -0.004319131374359131, -0.01769007444381714], target=[3, 4, 3]))" + "(EvaluationSpeech(name='aopc_compr_speech', score=[0.3684087162837386, 0.24896559864282608, 0.5148161184042692], target=[3, 4, 3]),\n", + " EvaluationSpeech(name='aopc_suff', score=[0.014175561256706715, -0.004319146275520325, -0.01769007444381714], target=[3, 4, 3]))" ] }, "execution_count": 13, @@ -422,123 +423,127 @@ "name": "stderr", "output_type": "stream", "text": [ - "Perturbation type: 100%|████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [01:29<00:00, 11.25s/it]\n" + "Perturbation type: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:57<00:00, 7.21s/it]\n" ] }, { "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 pitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoisepitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoise
action=increase0.310.240.410.330.330.330.190.27action=increase0.310.240.410.330.330.330.250.27
object=heat0.250.190.33-0.02-0.02-0.02-0.020.23object=heat0.250.190.33-0.02-0.02-0.020.020.23
location=washroom0.020.020.02-0.02-0.02-0.020.000.70location=washroom0.020.020.02-0.02-0.02-0.02-0.010.70
\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -563,7 +568,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -577,7 +582,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -619,7 +624,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.0" + "version": "3.10.13" } }, "nbformat": 4, diff --git a/examples/speech/getting_started.ipynb b/examples/speech/getting_started.ipynb index 19b27ca..ae42e4e 100644 --- a/examples/speech/getting_started.ipynb +++ b/examples/speech/getting_started.ipynb @@ -101,6 +101,7 @@ } ], "source": [ + "# Note: set the ordinal of the device according to your system.\n", "device_str = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n", "device = torch.device(device_str)\n", "\n", From 4ff1c2ba6f06274f9b7bb6ef66b80770232679d0 Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Wed, 27 Mar 2024 16:33:48 +0100 Subject: [PATCH 18/21] Iron out minor details (use normalized arrays in all the speech explainers), commit re-evaluated notebooks --- examples/speech/audio_from_hf.ipynb | 122 ++++---- examples/speech/getting_started.ipynb | 280 +++++++++--------- .../gradient_equal_width_explainer.py | 8 +- .../equal_width/lime_equal_width_explainer.py | 9 +- .../equal_width/loo_equal_width_explainer.py | 6 +- .../gradient_speech_explainer.py | 5 +- .../lime_speech_explainer.py | 13 +- .../loo_speech_explainer.py | 10 +- .../paraling_speech_explainer.py | 26 +- .../explanation_speech/utils_removal.py | 2 - ferret/speechxai_utils.py | 14 - pyproject.toml | 2 +- 12 files changed, 251 insertions(+), 246 deletions(-) diff --git a/examples/speech/audio_from_hf.ipynb b/examples/speech/audio_from_hf.ipynb index 4d6bca0..67da144 100644 --- a/examples/speech/audio_from_hf.ipynb +++ b/examples/speech/audio_from_hf.ipynb @@ -26,9 +26,9 @@ "name": "stderr", "output_type": "stream", "text": [ - "/home/moscato/miniconda3/envs/ferret-testing/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + "/home/moscato/miniconda3/envs/ferret-testing-2/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n", - "/home/moscato/miniconda3/envs/ferret-testing/lib/python3.10/site-packages/pyannote/audio/core/io.py:43: UserWarning: torchaudio._backend.set_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n", + "/home/moscato/miniconda3/envs/ferret-testing-2/lib/python3.10/site-packages/pyannote/audio/core/io.py:43: UserWarning: torchaudio._backend.set_audio_backend has been deprecated. With dispatcher enabled, this function is no-op. You can remove the function call.\n", " torchaudio.set_audio_backend(\"soundfile\")\n", "torchvision is not available - cannot save figures\n" ] @@ -284,7 +284,7 @@ " [ 0.07733703, -0.02064097, 0.34651214, -0.01588559, -0.01463729,\n", " -0.02365428],\n", " [-0.01432282, -0.01848161, -0.00988954, -0.00070852, -0.01123005,\n", - " 0.32860351]]), explainer='loo_speech+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + " 0.32860351]]), explainer='loo_speech+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" ] } ], @@ -313,7 +313,7 @@ " [-4.55664854e-02, 2.00781865e-04, 3.07805077e-01,\n", " -7.30899444e-03, 8.18159192e-03, 1.45066601e-01],\n", " [ 7.67945654e-02, -1.63121489e-02, 1.69544356e-01,\n", - " 1.03233346e-02, 6.95427875e-02, 4.02942513e-01]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + " 1.03233346e-02, 6.95427875e-02, 4.02942513e-01]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" ] } ], @@ -366,7 +366,7 @@ " [-0.00192932, 0.04791306, 0.30365684, 0.01351914, -0.02577573,\n", " 0.13388129],\n", " [ 0.0786875 , -0.029679 , 0.21510288, 0.02970933, 0.03952171,\n", - " 0.44306297]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" + " 0.44306297]]), explainer='LIME+silence', target=[3, 4, 3], audio=, word_timestamps=[{'word': 'Increase', 'start': 0.737, 'end': 1.02, 'score': 0.438}, {'word': 'the', 'start': 1.04, 'end': 1.121, 'score': 0.141}, {'word': 'temperature', 'start': 1.141, 'end': 1.526, 'score': 0.444}, {'word': 'in', 'start': 1.546, 'end': 1.627, 'score': 0.848}, {'word': 'the', 'start': 1.647, 'end': 1.728, 'score': 0.953}, {'word': 'washroom.', 'start': 1.768, 'end': 2.132, 'score': 0.588}])\n" ] } ], @@ -423,127 +423,127 @@ "name": "stderr", "output_type": "stream", "text": [ - "Perturbation type: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:57<00:00, 7.21s/it]\n" + "Perturbation type: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [01:15<00:00, 9.44s/it]\n" ] }, { "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 pitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoisepitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoise
action=increase0.310.240.410.330.330.330.250.27action=increase0.310.240.410.330.330.330.240.27
object=heat0.250.190.33-0.02-0.02-0.020.020.23object=heat0.250.190.33-0.02-0.02-0.02-0.000.23
location=washroom0.020.020.02-0.02-0.02-0.02-0.010.70location=washroom0.020.020.02-0.02-0.02-0.02-0.010.70
\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -624,7 +624,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.10.14" } }, "nbformat": 4, diff --git a/examples/speech/getting_started.ipynb b/examples/speech/getting_started.ipynb index ae42e4e..ddedf50 100644 --- a/examples/speech/getting_started.ipynb +++ b/examples/speech/getting_started.ipynb @@ -26,7 +26,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "/home/moscato/miniconda3/envs/ferret-testing/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + "/home/moscato/miniconda3/envs/ferret-testing-2/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n", "torchvision is not available - cannot save figures\n" ] @@ -171,7 +171,7 @@ " " ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -238,96 +238,96 @@ "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 Turnupthebedroomheat.Turnupthebedroomheat.
action=increase0.2510.5450.2430.1300.021action=increase0.2510.5450.2430.1300.021
object=heat-0.000-0.000-0.0000.0140.412object=heat-0.000-0.000-0.0000.0140.412
location=bedroom0.0020.0060.0820.9970.242location=bedroom0.0020.0060.0820.9970.242
\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -375,100 +375,100 @@ "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 Turnupthebedroomheat.Turnupthebedroomheat.
action=increase0.1550.2730.1170.2810.149action=increase0.1550.2730.1170.2810.149
object=heat0.0550.0150.065-0.0070.211object=heat0.0550.0150.065-0.0070.211
location=bedroom-0.065-0.0050.2530.7070.036location=bedroom-0.065-0.0050.2530.7070.036
\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -520,150 +520,150 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "Perturbation type: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:28<00:00, 3.61s/it]\n" + "Perturbation type: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:39<00:00, 4.98s/it]\n" ] }, { "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 pitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoisepitch shiftingpitch shifting downpitch shifting uptime stretchingtime stretching downtime stretching upreverberationnoise
action=increase0.080.040.130.110.190.040.630.44action=increase0.080.040.130.110.190.040.610.44
object=heat0.02-0.000.040.000.000.000.000.29object=heat0.02-0.000.040.000.000.000.000.29
location=bedroom0.220.130.330.020.030.010.340.60location=bedroom0.220.130.330.020.030.010.390.60
\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -687,7 +687,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -701,7 +701,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -743,7 +743,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.10.14" } }, "nbformat": 4, diff --git a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py index 604f872..84b2ea4 100644 --- a/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/gradient_equal_width_explainer.py @@ -76,7 +76,7 @@ def compute_explanation( "Aggregation method not supported, choose between 'mean' and 'max'" ) - audio_np = audio.array + audio_np = audio.normalized_array # Predict logits/probabilities logits_original = self.model_helper.predict([audio_np]) @@ -113,9 +113,9 @@ def compute_explanation( importances = [] a, b = 0, 0 # 50, 20 - duration_s = len(audio_np) / audio.sample_rate - # no need to use the duration on the pydub version since - # the pydub audio segment is not even used here + # Note: assuming mono audio here ([duration in s] = [n samples] / + # [sample rate]). + duration_s = len(audio_np) / audio.sample_rate a, b = 0, 0 for e, i in enumerate(np.arange(0, duration_s, num_s_split)): diff --git a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py index 6fd377a..e281ad8 100644 --- a/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/lime_equal_width_explainer.py @@ -34,7 +34,7 @@ def compute_explanation( "Removal method not supported, choose between 'silence' and 'noise'" ) - audio_np = audio.array + audio_np = audio.normalized_array # Predict logits/probabilities logits_original = self.model_helper.predict([audio_np]) @@ -56,8 +56,6 @@ def compute_explanation( else: targets = [int(np.argmax(logits_original, axis=1)[0])] - # GG: removed the reshaping since it is already done in FerretAudio - # Get the start and end indexes of the segments. These will be used to split the audio and derive LIME interpretable features sampling_rate = self.model_helper.feature_extractor.sampling_rate splits = [] @@ -89,7 +87,10 @@ def compute_explanation( predict_proba_function = self.model_helper.predict from copy import deepcopy - input_audio = deepcopy(audio_np) + # WARNING: this is the original reshaping, which assumes that + # `LimeTimeSeriesExplainer` accepts an array with shape + # (1, n_samples). + input_audio = deepcopy(audio_np.reshape(1, -1)) # Explain the instance using the splits as interpretable features exp = lime_explainer.explain_instance( diff --git a/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py b/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py index f59aa7a..031cb41 100644 --- a/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py +++ b/ferret/explainers/explanation_speech/equal_width/loo_equal_width_explainer.py @@ -45,6 +45,8 @@ def remove_audio_segment(audio, start_s, end_s, removal_type: str = "silence"): audio_removed = before_word_audio + replace_word_audio + after_word_audio return audio_removed + + class LOOSpeechEqualWidthExplainer: NAME = "loo_speech_equal_width" @@ -65,7 +67,7 @@ def compute_explanation( ## Load audio as pydub.AudioSegment audio_as = audio.to_pydub() - audio_np = audio.array + audio_np = audio.normalized_array ## Remove word audio_remove_segments = [] @@ -77,6 +79,8 @@ def compute_explanation( end_s = min(i + num_s_split, duration_s) audio_removed = remove_audio_segment(audio_as, start_s, end_s, removal_type) + # Using `pydub_to_np` to avoid converting to a `FerretAudio` + # instance with no real need for it. audio_remove_segments.append(pydub_to_np(audio_removed)[0]) if display_audio: diff --git a/ferret/explainers/explanation_speech/gradient_speech_explainer.py b/ferret/explainers/explanation_speech/gradient_speech_explainer.py index 74e57a4..e8ff2b7 100644 --- a/ferret/explainers/explanation_speech/gradient_speech_explainer.py +++ b/ferret/explainers/explanation_speech/gradient_speech_explainer.py @@ -79,7 +79,10 @@ def compute_explanation( ) # Load audio and convert to np.array - audio_array = audio.array + # Note: we use the normalized array for consistency with the original + # SpeechXAI code (it used to come from the `pydub_to_np` + # function). + audio_array = audio.normalized_array # Predict logits/probabilities logits_original = self.model_helper.predict([audio_array]) diff --git a/ferret/explainers/explanation_speech/lime_speech_explainer.py b/ferret/explainers/explanation_speech/lime_speech_explainer.py index 8dd7416..8ed37f4 100644 --- a/ferret/explainers/explanation_speech/lime_speech_explainer.py +++ b/ferret/explainers/explanation_speech/lime_speech_explainer.py @@ -35,7 +35,11 @@ def compute_explanation( "Removal method not supported, choose between 'silence' and 'noise'" ) - audio_array = audio.array + # Note: we use the normalized array for consistency with the original + # SpeechXAI code (it used to come from the `pydub_to_np` + # function). + audio_array = audio.normalized_array + # Predict logits/probabilities logits_original = self.model_helper.predict([audio_array]) @@ -56,11 +60,6 @@ def compute_explanation( else: targets = [int(np.argmax(logits_original, axis=1)[0])] - # if word_timestamps is None: - # Transcribe audio - # word_timestamps = audio.transcription - audio_np = audio_array.reshape(1, -1) - # Get the start and end indexes of the words. These will be used to split the audio and derive LIME interpretable features tot_len = audio_array.shape[0] sampling_rate = self.model_helper.feature_extractor.sampling_rate @@ -91,7 +90,7 @@ def compute_explanation( predict_proba_function = self.model_helper.predict from copy import deepcopy - input_audio = deepcopy(audio_np) + input_audio = deepcopy(audio_array.reshape(1, -1)) # Explain the instance using the splits as interpretable features exp = lime_explainer.explain_instance( diff --git a/ferret/explainers/explanation_speech/loo_speech_explainer.py b/ferret/explainers/explanation_speech/loo_speech_explainer.py index ae26100..5588d8e 100644 --- a/ferret/explainers/explanation_speech/loo_speech_explainer.py +++ b/ferret/explainers/explanation_speech/loo_speech_explainer.py @@ -41,6 +41,9 @@ def remove_words( for word in word_timestamps: audio_removed = remove_word(pydub_segment, word, removal_type) + + # Note: we might potentially put `audio_removed` into a + # `FerretAudio` object, but it'd be an additional step. audio_no_words.append(pydub_to_np(audio_removed)[0]) if display_audio: @@ -67,9 +70,10 @@ def compute_explanation( logits_modified = self.model_helper.predict(modified_audios) - # GA: we don't need this conversion as we already have the numpy audio array in FerretAudio - # audio = pydub_to_np(AudioSegment.from_wav(audio_path))[0] - audio_array = audio.array + # Note: we use the normalized array for consistency with the original + # SpeechXAI code (it used to come from the `pydub_to_np` + # function). + audio_array = audio.normalized_array logits_original = self.model_helper.predict([audio_array]) diff --git a/ferret/explainers/explanation_speech/paraling_speech_explainer.py b/ferret/explainers/explanation_speech/paraling_speech_explainer.py index b8fdc85..e0160b7 100644 --- a/ferret/explainers/explanation_speech/paraling_speech_explainer.py +++ b/ferret/explainers/explanation_speech/paraling_speech_explainer.py @@ -238,7 +238,7 @@ def perturbe_waveform( return_perturbations=False, verbose: bool = False, verbose_target: int = 0, - ): # -> List[np.ndarray]: + ): """ Perturbate audio using pydub, by adding: - pitch shifting @@ -331,7 +331,7 @@ def perturbe_waveform( ) pydub_segment = audio.to_pydub() - audio_array = audio.array + for perturbation_value in perturbations: if "time stretching" in perturbation_type: if USE_AUDIOSTRETCH: @@ -346,15 +346,20 @@ def perturbe_waveform( # perturbated_audio = self.pitch_shifting_augmentation( # audio_as, perturbation_value # ) + + + # Note: here we assume frame rate and sample rate are the + # same, which is always true for single-channel (mono) + # audio. perturbated_audio = self.change_pitch_torchaudio( - audio.array, + audio.normalized_array, audio.current_sr, - perturbation_value, # TODO: Assuming frame rate == sampling rate + perturbation_value, ) elif perturbation_type == "noise" and USE_ADD_NOISE_TORCHAUDIO: perturbated_audio = self.add_white_noise_torchaudio( - audio.array, perturbation_value + audio.normalized_array, perturbation_value ) else: augment = self.augmentation( @@ -362,7 +367,8 @@ def perturbe_waveform( perturbation_type=perturbation_type, ) perturbated_audio = augment( - samples=audio.array.squeeze(), sample_rate=audio.current_sr + samples=audio.normalized_array.squeeze(), + sample_rate=audio.current_sr ) if verbose: @@ -403,7 +409,10 @@ def compute_explanation( logits_modified = self.model_helper.predict(modified_audios) - logits_original = self.model_helper.predict([audio.array]) + # Note: we use the normalized array for consistency with the original + # SpeechXAI code (it used to come from the `pydub_to_np` + # function). + logits_original = self.model_helper.predict([audio.normalized_array]) # Check if single label or multilabel scenario as for FSC n_labels = self.model_helper.n_labels @@ -466,7 +475,8 @@ def explain_variations( ): n_labels = self.model_helper.n_labels - audio_array = audio.array + audio_array = audio.normalized_array + original_gt = self.model_helper.get_predicted_probs(audio=audio_array) if target_class is None: diff --git a/ferret/explainers/explanation_speech/utils_removal.py b/ferret/explainers/explanation_speech/utils_removal.py index 6329d16..20ed538 100644 --- a/ferret/explainers/explanation_speech/utils_removal.py +++ b/ferret/explainers/explanation_speech/utils_removal.py @@ -69,8 +69,6 @@ def remove_word(audio, word, removal_type: str = "nothing"): after_word_audio = audio[word["end"] * 1000 + b :] word_duration = (word["end"] * 1000 - word["start"] * 1000) + a + b - # TODO GA: we don't really to use pydub here, we can use numpy directly - if removal_type == "nothing": replace_word_audio = AudioSegment.empty() elif removal_type == "silence": diff --git a/ferret/speechxai_utils.py b/ferret/speechxai_utils.py index d9a4175..827dc30 100644 --- a/ferret/speechxai_utils.py +++ b/ferret/speechxai_utils.py @@ -60,20 +60,6 @@ def _is_normalized(self) -> bool: def normalized_array(self) -> np.ndarray: return self.array / 32768.0 if not self._is_normalized else self.array - # @property - # def transcription(self): - # if self._transcription is None: - # if ( - # ): - # _, self._transcription = transcribe_audio( - # audio=self.normalized_array, # is normalization needed when transcribing? i am assumimg so - # device="cuda", - # batch_size=2, - # compute_type="float32", - # ) - # else: - # return self._transcription - def resample(self, target_sr: int): """ Resample the audio to the target sampling rate. In place operation. diff --git a/pyproject.toml b/pyproject.toml index e4f3c54..d899a41 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ audio-effects = { version = "0.22", optional = true } # so the dependency needs to be installed from the GitHub repo, which in turns # prevents it from being used among the extras in pyproject.toml. Until a # working version of WhisperX is released, the users are required to install -# it from the repo manually. +# it from the repo manually with: `pip install git+https://github.com/m-bain/whisperx.git` # whisperx = { version = "3.1.2", optional = true } [tool.poetry.extras] From 282fc6318283a6686d131b7177ab9557326e028a Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Wed, 27 Mar 2024 16:48:38 +0100 Subject: [PATCH 19/21] Fix log printing --- .../explanation_speech/paraling_speech_explainer.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ferret/explainers/explanation_speech/paraling_speech_explainer.py b/ferret/explainers/explanation_speech/paraling_speech_explainer.py index e0160b7..8469af6 100644 --- a/ferret/explainers/explanation_speech/paraling_speech_explainer.py +++ b/ferret/explainers/explanation_speech/paraling_speech_explainer.py @@ -550,7 +550,7 @@ def _tmp_log_show_info( # Note that in a single label scenario, verbose_target is ignored (always 0) - print_log(perturbation_type, perturbation_value) + print(perturbation_type, perturbation_value) # Prediction probability predictions = self.model_helper.predict([perturbated_audio]) @@ -561,22 +561,22 @@ def _tmp_log_show_info( preds = self.model_helper.get_text_labels(predicted_labels) if self.model_helper.n_labels > 1: - print_log(f"Target label: {verbose_target}") - print_log( + print(f"Target label: {verbose_target}") + print( f"Predicted probs:", np.round(predictions[verbose_target], 3), ) - print_log( + print( "Predicted class: ", preds[verbose_target], f"id: {predicted_labels[verbose_target]}", ) else: - print_log( + print( f"Predicted probs: ", np.round(predictions[0], 3), ) - print_log( + print( "Predicted class: ", preds, f"id: {predicted_labels[0]}", From 38386c03efcf6c8d96ea39942fd965e5c8b16432 Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Wed, 27 Mar 2024 17:02:30 +0100 Subject: [PATCH 20/21] Fix dependency in github workflow --- .github/workflows/flake8-pytest.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/flake8-pytest.yml b/.github/workflows/flake8-pytest.yml index 36734e8..0dd7bb0 100644 --- a/.github/workflows/flake8-pytest.yml +++ b/.github/workflows/flake8-pytest.yml @@ -30,6 +30,7 @@ jobs: python -m pip install flake8 pytest poetry # poetry install --all-extras pip install -e .[all] + pip install git+https://github.com/m-bain/whisperx.git - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names From 2fef1e79ff5e0112ea1f18a5f354c160381ae092 Mon Sep 17 00:00:00 2001 From: emanuele-moscato Date: Wed, 27 Mar 2024 17:14:23 +0100 Subject: [PATCH 21/21] Fix dependency --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index d899a41..8709b80 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,6 +43,7 @@ lime = "^0.2.0.1" joblib = "^1.3.2" pytreebank = "^0.2.7" thermostat-datasets = "^1.1.0" +ipython = "^8.22.2" # Speech-XAI additional requirements to allow for `pip install ferret[speech]`. pydub = { version = "0.25.1", optional = true } audiomentations = { version = "0.34.1", optional = true }