From 6b4e24bac99e221f94ec0def3f510fb779a5abc6 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Wed, 13 Mar 2024 12:43:20 +0100 Subject: [PATCH 1/8] Improve documentation (#601) * Improve documentation * small fix * remove * Update docs/source/optimization_ov.mdx Co-authored-by: Alexander Kozlov * Update docs/source/inference.mdx Co-authored-by: Helena Kloosterman * fix typo * update paragraph --------- Co-authored-by: Alexander Kozlov Co-authored-by: Helena Kloosterman --- docs/source/inference.mdx | 11 +-- docs/source/optimization_ov.mdx | 141 +++++++++++++++++++------------- 2 files changed, 92 insertions(+), 60 deletions(-) diff --git a/docs/source/inference.mdx b/docs/source/inference.mdx index 65480c1d2f..e0b60baa2e 100644 --- a/docs/source/inference.mdx +++ b/docs/source/inference.mdx @@ -99,21 +99,22 @@ tokenizer.save_pretrained(save_directory) ### Weight-only quantization -You can also apply 8-bit or 4-bit weight quantization when exporting your model with the CLI by setting the `weight-format` argument to respectively `int8` or `int4`: +You can also apply fp16, 8-bit or 4-bit weight compression on the Linear, Convolutional and Embedding layers when exporting your model with the CLI by setting `--weight-format` to respectively `fp16`, `int8` or `int4`: ```bash optimum-cli export openvino --model gpt2 --weight-format int8 ov_model ``` -This will result in the exported model linear and embedding layers to be quantized to INT8 or INT4, the activations will be kept in floating point precision. This type of optimization allows reducing the footprint and latency of LLMs. +This type of optimization allows to reduce the memory footprint and inference latency. -By default the quantization scheme will be [assymmetric](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md#asymmetric-quantization), to make it [symmetric](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md#symmetric-quantization) you can add `--sym`. + +By default the quantization scheme will be [asymmetric](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md#asymmetric-quantization), to make it [symmetric](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md#symmetric-quantization) you can add `--sym`. For INT4 quantization you can also specify the following arguments : * The `--group-size` parameter will define the group size to use for quantization, `-1` it will results in per-column quantization. * The `--ratio` parameter controls the ratio between 4-bit and 8-bit quantization. If set to 0.9, it means that 90% of the layers will be quantized to `int4` while 10% will be quantized to `int8`. -Smaller `group_size` and `ratio` of usually improve accuracy at the sacrifice of the model size and inference latency. +Smaller `group_size` and `ratio` values usually improve accuracy at the sacrifice of the model size and inference latency. You can also apply 8-bit quantization on your model's weight when loading your model by setting the `load_in_8bit=True` argument when calling the `from_pretrained()` method. @@ -125,7 +126,7 @@ model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) -`load_in_8bit` is enabled by default for the models larger than 1 billion parameters. +`load_in_8bit` is enabled by default for the models larger than 1 billion parameters. You can disable it with `load_in_8bit=False`. diff --git a/docs/source/optimization_ov.mdx b/docs/source/optimization_ov.mdx index 70c98f14f7..1e78c36805 100644 --- a/docs/source/optimization_ov.mdx +++ b/docs/source/optimization_ov.mdx @@ -19,15 +19,72 @@ limitations under the License. 🤗 Optimum Intel provides an `openvino` package that enables you to apply a variety of model compression methods such as quantization, pruning, on many models hosted on the 🤗 hub using the [NNCF](https://docs.openvino.ai/2022.1/docs_nncf_introduction.html) framework. -## Post-training optimization +## Post-training -Post-training static quantization introduces an additional calibration step where data is fed through the network in order to compute the activations quantization parameters. -Here is how to apply static quantization on a fine-tuned DistilBERT: +Quantization is a technique to reduce the computational and memory costs of running inference by representing the weights and / or the activations with lower precision data types like 8-bit or 4-bit. + +### Weight-only quantization + +Quantization can be applied on the model's Linear, Convolutional and Embedding layers, enabling the loading of large models on memory-limited devices. For example, when applying 8-bit quantization, the resulting model will be x4 smaller than its fp32 counterpart. For 4-bit quantization, the reduction in memory could theoretically reach x8, but is closer to x6 in practice. + + +#### 8-bit + +For the 8-bit weight quantization you can set `load_in_8bit=True` to load your model's weights in 8-bit: ```python -from functools import partial -from transformers import AutoTokenizer -from optimum.intel import OVConfig, OVQuantizer, OVModelForSequenceClassification, +from optimum.intel import OVModelForCausalLM + +model_id = "helenai/gpt2-ov" +model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) + +# Saves the int8 model that will be x4 smaller than its fp32 counterpart +model.save_pretrained(saving_directory) +``` + + + +`load_in_8bit` is enabled by default for the models larger than 1 billion parameters. You can disable it with `load_in_8bit=False`. + + + +You can also provide a `quantization_config` instead to specify additional optimization parameters. + +#### 4-bit + +For the 4-bit weight quantization, you need a `quantization_config` to define the optimization parameters, for example: + +```python +from optimum.intel import OVModelForCausalLM, OVWeightQuantizationConfig + +quantization_config = OVWeightQuantizationConfig(bits=4) +model = OVModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) +``` + +You can tune quantization parameters to achieve a better performance accuracy trade-off as follows: + +```python +quantization_config = OVWeightQuantizationConfig(bits=4, sym=False, ratio=0.8, dataset="ptb") +``` + +By default the quantization scheme will be [asymmetric](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md#asymmetric-quantization), to make it [symmetric](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md#symmetric-quantization) you can add `sym=True`. + +For 4-bit quantization you can also specify the following arguments in the quantization configuration : +* The `group_size` parameter will define the group size to use for quantization, `-1` it will results in per-column quantization. +* The `ratio` parameter controls the ratio between 4-bit and 8-bit quantization. If set to 0.9, it means that 90% of the layers will be quantized to `int4` while 10% will be quantized to `int8`. + +Smaller `group_size` and `ratio` values usually improve accuracy at the sacrifice of the model size and inference latency. + +### Static quantization + +When applying post-training static quantization, both the weights and the activations are quantized. +To apply quantization on the activations, an additional calibration step is needed which consists in feeding a `calibration_dataset` to the network in order to estimate the quantization activations parameters. + +Here is how to apply static quantization on a fine-tuned DistilBERT given your own `calibration_dataset`: + +```python +from transformers import AutoTokenizer +from optimum.intel import OVQuantizer, OVModelForSequenceClassification, model_id = "distilbert-base-uncased-finetuned-sst-2-english" model = OVModelForSequenceClassification.from_pretrained(model_id, export=True) @@ -35,11 +92,22 @@ tokenizer = AutoTokenizer.from_pretrained(model_id) # The directory where the quantized model will be saved save_dir = "ptq_model" +quantizer = OVQuantizer.from_pretrained(model) + +# Apply static quantization and export the resulting quantized model to OpenVINO IR format +quantizer.quantize(calibration_dataset=calibration_dataset, save_directory=save_dir) +# Save the tokenizer +tokenizer.save_pretrained(save_dir) +``` + +The calibration dataset can also be created easily using your `OVQuantizer`: + +```python +from functools import partial + def preprocess_function(examples, tokenizer): return tokenizer(examples["sentence"], padding="max_length", max_length=128, truncation=True) -# Instantiate our OVQuantizer using the desired configuration -quantizer = OVQuantizer.from_pretrained(model) # Create the calibration dataset used to perform static quantization calibration_dataset = quantizer.get_calibration_dataset( "glue", @@ -48,33 +116,23 @@ calibration_dataset = quantizer.get_calibration_dataset( num_samples=300, dataset_split="train", ) -# Apply static quantization and export the resulting quantized model to OpenVINO IR format -quantizer.quantize( - calibration_dataset=calibration_dataset, - save_directory=save_dir, -) -# Save the tokenizer -tokenizer.save_pretrained(save_dir) ``` -The `quantize()` method applies post-training static quantization and export the resulting quantized model to the OpenVINO Intermediate Representation (IR). The resulting graph is represented with two files: an XML file describing the network topology and a binary file describing the weights. The resulting model can be run on any target Intel device. -## Weight-only quantization +The `quantize()` method applies post-training static quantization and export the resulting quantized model to the OpenVINO Intermediate Representation (IR). The resulting graph is represented with two files: an XML file describing the network topology and a binary file describing the weights. The resulting model can be run on any target Intel device. -You can optimize the performance of text-generation LLMs by quantizing weights to various precisions that provide different performance-accuracy trade-offs. -```python -from optimum.intel import OVModelForCausalLM +### Hybrid quantization -model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) -``` +Traditional optimization methods like post-training 8-bit quantization do not work well for Stable Diffusion (SD) models and can lead to poor generation results. On the other hand, weight compression does not improve performance significantly when applied to Stable Diffusion models, as the size of activations is comparable to weights. +The U-Net component takes up most of the overall execution time of the pipeline. Thus, optimizing just this one component can bring substantial benefits in terms of inference speed while keeping acceptable accuracy without fine-tuning. Quantizing the rest of the diffusion pipeline does not significantly improve inference performance but could potentially lead to substantial accuracy degradation. +Therefore, the proposal is to apply quantization in *hybrid mode* for the U-Net model and weight-only quantization for the rest of the pipeline components : +* U-Net : quantization applied on both the weights and activations +* The text encoder, VAE encoder / decoder : quantization applied on the weights -## Hybrid quantization +The hybrid mode involves the quantization of weights in MatMul and Embedding layers, and activations of other layers, facilitating accuracy preservation post-optimization while reducing the model size. -Traditional optimization methods like post-training 8-bit quantization do not work well for Stable Diffusion models and can lead to poor generation results. On the other hand, weight compression does not improve performance significantly when applied to Stable Diffusion models, as the size of activations is comparable to weights. -The UNet model takes up most of the overall execution time of the pipeline. Thus, optimizing just one model brings substantial benefits in terms of inference speed while keeping acceptable accuracy without fine-tuning. Quantizing the rest of the diffusion pipeline does not significantly improve inference performance but could potentially lead to substantial degradation of accuracy. -Therefore, the proposal is to apply quantization in *hybrid mode* for the UNet model and weight-only quantization for the rest of the pipeline components. The hybrid mode involves the quantization of weights in MatMul and Embedding layers, and activations of other layers, facilitating accuracy preservation post-optimization while reducing the model size. -The `quantization_config` is utilized to define optimization parameters for optimizing the Stable Diffusion pipeline. To enable hybrid quantization, specify the quantization dataset in the `quantization_config`. Otherwise, weight-only quantization to a specified data type (8 tr 4 bits) is applied to UNet model. +The `quantization_config` is utilized to define optimization parameters for optimizing the SD pipeline. To enable hybrid quantization, specify the quantization dataset in the `quantization_config`. If the dataset is not defined, weight-only quantization will be applied on all components. ```python from optimum.intel import OVStableDiffusionPipeline, OVWeightQuantizationConfig @@ -86,38 +144,11 @@ model = OVStableDiffusionPipeline.from_pretrained( ) ``` - - -`load_in_8bit` is enabled by default for the models larger than 1 billion parameters. - - - -For the 4-bit weight quantization you can use the `quantization_config` to specify the optimization parameters, for example: - -```python -from optimum.intel import OVModelForCausalLM, OVWeightQuantizationConfig - -model = OVModelForCausalLM.from_pretrained( - model_id, - quantization_config=OVWeightQuantizationConfig(bits=4), -) -``` - -You can tune quantization parameters to achieve a better performance accuracy trade-off as follows: - -```python -from optimum.intel import OVModelForCausalLM, OVWeightQuantizationConfig - -model = OVModelForCausalLM.from_pretrained( - model_id, - quantization_config=OVWeightQuantizationConfig(bits=4, sym=False, ratio=0.8, dataset="ptb"), -) -``` For more details, please refer to the corresponding NNCF [documentation](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/CompressWeights.md). -## Training-time optimization +## Training-time Apart from optimizing a model after training like post-training quantization above, `optimum.openvino` also provides optimization methods during training, namely Quantization-Aware Training (QAT) and Joint Pruning, Quantization and Distillation (JPQD). From 2588077a9f57d829b6d2344b3d8b643397137132 Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Wed, 13 Mar 2024 15:44:06 +0400 Subject: [PATCH 2/8] Add commit id into dev version setup (#597) * add commit id into dev version setup * do not update version if can not get commit --- setup.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/setup.py b/setup.py index ac4056c30d..3a1e1123d0 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,6 @@ +import os import re +import subprocess from setuptools import find_namespace_packages, setup @@ -8,6 +10,19 @@ filepath = "optimum/intel/version.py" with open(filepath) as version_file: (__version__,) = re.findall('__version__ = "(.*)"', version_file.read()) + if __version__.endswith(".dev0"): + dev_version_id = "" + try: + repo_root = os.path.dirname(os.path.realpath(__file__)) + dev_version_id = ( + subprocess.check_output(["git", "rev-parse", "--short", "HEAD"], cwd=repo_root) # nosec + .strip() + .decode() + ) + dev_version_id = "+" + dev_version_id + except subprocess.CalledProcessError: + pass + __version__ = __version__ + dev_version_id except Exception as error: assert False, "Error: Could not open '%s' due %s\n" % (filepath, error) From 8880d2e77c56cb158c96a264cc7f784b1e72189a Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Thu, 14 Mar 2024 17:15:14 +0400 Subject: [PATCH 3/8] Add openvino export configs (#568) * add openvino export configs * more libs * more libs * mixtral and model patcher * chatglm export * rework chatglm config * more testing models * rework config registration * add chatglm in tests * Update tests/openvino/test_modeling.py * fix style * gemma * add test models * qwen * fix failed tests * add comment for gemma --- optimum/exporters/openvino/__init__.py | 2 + optimum/exporters/openvino/__main__.py | 18 +- optimum/exporters/openvino/convert.py | 49 +-- optimum/exporters/openvino/model_configs.py | 391 +++++++++++++++++ optimum/exporters/openvino/model_patcher.py | 441 +++++++++++++++++++- optimum/intel/openvino/modeling_decoder.py | 4 +- optimum/intel/openvino/quantization.py | 2 +- setup.py | 4 +- tests/openvino/test_modeling.py | 72 +++- tests/openvino/utils_tests.py | 8 + 10 files changed, 933 insertions(+), 58 deletions(-) create mode 100644 optimum/exporters/openvino/model_configs.py diff --git a/optimum/exporters/openvino/__init__.py b/optimum/exporters/openvino/__init__.py index 9664f6ae6d..94ea4f103b 100644 --- a/optimum/exporters/openvino/__init__.py +++ b/optimum/exporters/openvino/__init__.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import optimum.exporters.openvino.model_configs + from .__main__ import main_export from .convert import export, export_from_model, export_models, export_pytorch_via_onnx from .stateful import ensure_stateful_is_available, patch_stateful diff --git a/optimum/exporters/openvino/__main__.py b/optimum/exporters/openvino/__main__.py index 1c695e2f19..02268a3604 100644 --- a/optimum/exporters/openvino/__main__.py +++ b/optimum/exporters/openvino/__main__.py @@ -58,7 +58,7 @@ def main_export( local_files_only: bool = False, use_auth_token: Optional[Union[bool, str]] = None, model_kwargs: Optional[Dict[str, Any]] = None, - custom_onnx_configs: Optional[Dict[str, "OnnxConfig"]] = None, + custom_export_configs: Optional[Dict[str, "OnnxConfig"]] = None, fn_get_submodels: Optional[Callable] = None, compression_option: Optional[str] = None, compression_ratio: Optional[float] = None, @@ -112,11 +112,11 @@ def main_export( when running `transformers-cli login` (stored in `~/.huggingface`). model_kwargs (`Optional[Dict[str, Any]]`, defaults to `None`): Experimental usage: keyword arguments to pass to the model during - the export. This argument should be used along the `custom_onnx_configs` argument + the export. This argument should be used along the `custom_export_configs` argument in case, for example, the model inputs/outputs are changed (for example, if `model_kwargs={"output_attentions": True}` is passed). - custom_onnx_configs (`Optional[Dict[str, OnnxConfig]]`, defaults to `None`): - Experimental usage: override the default ONNX config used for the given model. This argument may be useful for advanced users that desire a finer-grained control on the export. An example is available [here](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model). + custom_export_configs (`Optional[Dict[str, OnnxConfig]]`, defaults to `None`): + Experimental usage: override the default export config used for the given model. This argument may be useful for advanced users that desire a finer-grained control on the export. An example is available [here](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model). fn_get_submodels (`Optional[Callable]`, defaults to `None`): Experimental usage: Override the default submodels that are used at the export. This is especially useful when exporting a custom architecture that needs to split the ONNX (e.g. encoder-decoder). If unspecified with custom models, optimum will try to use the default submodels used for the given task, with no guarantee of success. @@ -134,7 +134,7 @@ def main_export( ```python >>> from optimum.exporters.openvino import main_export - >>> main_export("gpt2", output="gpt2_onnx/") + >>> main_export("gpt2", output="gpt2_ov/") ``` """ @@ -206,14 +206,14 @@ def main_export( if model_type not in TasksManager._SUPPORTED_MODEL_TYPE: custom_architecture = True elif task not in TasksManager.get_supported_tasks_for_model_type( - model_type, exporter="onnx", library_name=library_name + model_type, exporter="openvino", library_name=library_name ): if original_task == "auto": autodetected_message = " (auto-detected)" else: autodetected_message = "" model_tasks = TasksManager.get_supported_tasks_for_model_type( - model_type, exporter="onnx", library_name=library_name + model_type, exporter="openvino", library_name=library_name ) raise ValueError( f"Asked to export a {model_type} model for the task {task}{autodetected_message}, but the Optimum OpenVINO exporter only supports the tasks {', '.join(model_tasks.keys())} for {model_type}. Please use a supported task. Please open an issue at https://github.com/huggingface/optimum/issues if you would like the task {task} to be supported in the ONNX export for {model_type}." @@ -288,7 +288,7 @@ class StoreAttr(object): not custom_architecture and library_name != "diffusers" and task + "-with-past" - in TasksManager.get_supported_tasks_for_model_type(model_type, exporter="onnx", library_name=library_name) + in TasksManager.get_supported_tasks_for_model_type(model_type, exporter="openvino", library_name=library_name) ): # Make -with-past the default if --task was not explicitely specified if original_task == "auto": @@ -319,7 +319,7 @@ class StoreAttr(object): ov_config=ov_config, stateful=stateful, model_kwargs=model_kwargs, - custom_onnx_configs=custom_onnx_configs, + custom_export_configs=custom_export_configs, fn_get_submodels=fn_get_submodels, preprocessors=preprocessors, device=device, diff --git a/optimum/exporters/openvino/convert.py b/optimum/exporters/openvino/convert.py index 5353912d48..dfca80f001 100644 --- a/optimum/exporters/openvino/convert.py +++ b/optimum/exporters/openvino/convert.py @@ -32,10 +32,11 @@ from optimum.exporters.onnx.convert import check_dummy_inputs_are_allowed from optimum.exporters.onnx.convert import export_pytorch as export_pytorch_to_onnx from optimum.exporters.onnx.convert import export_tensorflow as export_tensorflow_onnx +from optimum.exporters.utils import _get_submodels_and_export_configs from optimum.utils import DEFAULT_DUMMY_SHAPES, is_diffusers_available from optimum.utils.save_utils import maybe_save_preprocessors -from ...intel.utils.import_utils import is_nncf_available, is_optimum_version +from ...intel.utils.import_utils import is_nncf_available from .model_patcher import patch_model_with_bettertransformer from .stateful import ensure_export_task_support_stateful, ensure_stateful_is_available, patch_stateful from .utils import ( @@ -48,13 +49,6 @@ ) -if is_optimum_version(">=", "1.16.99"): - from optimum.exporters.onnx.utils import _get_submodels_and_onnx_configs - -else: - from optimum.exporters.onnx.__main__ import _get_submodels_and_onnx_configs - - UNSUPPORTED_TOKENIZER_CLASSES = (T5Tokenizer, T5TokenizerFast) @@ -418,7 +412,7 @@ def ts_patched_forward(*args, **kwargs): def export_models( - models_and_onnx_configs: Dict[ + models_and_export_configs: Dict[ str, Tuple[Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin"], "OnnxConfig"] ], output_dir: Path, @@ -434,7 +428,7 @@ def export_models( Export the models to OpenVINO IR format Args: - models_and_onnx_configs (Dict[ str, Tuple[Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin"], "OnnxConfig"]): + models_and_export_configs (Dict[ str, Tuple[Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin"], "OnnxConfig"]): output_dir (Path): output directory for saving models opset (Optional[int], optional, Default to None): ONNX export opset output_names (Optional[List[str]], optional, Defaults to None): model output names @@ -459,20 +453,20 @@ def export_models( outputs = [] - if output_names is not None and len(output_names) != len(models_and_onnx_configs): + if output_names is not None and len(output_names) != len(models_and_export_configs): raise ValueError( - f"Provided custom names {output_names} for the export of {len(models_and_onnx_configs)} models. Please provide the same number of names as models to export." + f"Provided custom names {output_names} for the export of {len(models_and_export_configs)} models. Please provide the same number of names as models to export." ) - for i, model_name in enumerate(models_and_onnx_configs.keys()): - submodel, sub_onnx_config = models_and_onnx_configs[model_name] + for i, model_name in enumerate(models_and_export_configs.keys()): + submodel, sub_export_config = models_and_export_configs[model_name] output_name = output_names[i] if output_names is not None else Path(model_name + ".xml") output_path = output_dir / output_name output_path.parent.mkdir(parents=True, exist_ok=True) outputs.append( export( model=submodel, - config=sub_onnx_config, + config=sub_export_config, output=output_path, opset=opset, device=device, @@ -495,7 +489,7 @@ def export_from_model( stateful: bool = True, opset: Optional[int] = None, model_kwargs: Optional[Dict[str, Any]] = None, - custom_onnx_configs: Optional[Dict[str, "OnnxConfig"]] = None, + custom_export_configs: Optional[Dict[str, "OnnxConfig"]] = None, fn_get_submodels: Optional[Callable] = None, preprocessors: List = None, device: str = "cpu", @@ -524,14 +518,14 @@ def export_from_model( task = TasksManager._infer_task_from_model_or_model_class(model=model) except (ValueError, KeyError) as e: raise RuntimeError( - f"The model task could not be automatically inferred in `onnx_export_from_model`. Please provide the argument `task` with the relevant task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}" + f"The model task could not be automatically inferred in `export_from_model`. Please provide the argument `task` with the relevant task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}" ) if ( not custom_architecture and library_name != "diffusers" and task + "-with-past" - in TasksManager.get_supported_tasks_for_model_type(model_type, "onnx", library_name=library_name) + in TasksManager.get_supported_tasks_for_model_type(model_type, "openvino", library_name=library_name) ): # -with-past is the default. task = task + "-with-past" @@ -541,9 +535,9 @@ def export_from_model( stateful = stateful and ensure_export_task_support_stateful(task) # TODO: support onnx_config.py in the model repo - if custom_architecture and custom_onnx_configs is None: + if custom_architecture and custom_export_configs is None: raise ValueError( - f"Trying to export a {model_type} model, that is a custom or unsupported architecture, but no custom onnx configuration was passed as `custom_onnx_configs`. Please refer to https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#custom-export-of-transformers-models for an example on how to export custom models. Please open an issue at https://github.com/huggingface/optimum/issues if you would like the model type {model_type} to be supported natively in the ONNX export." + f"Trying to export a {model_type} model, that is a custom or unsupported architecture, but no custom export configuration was passed as `custom_export_configs`. Please refer to https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#custom-export-of-transformers-models for an example on how to export custom models. Please open an issue at https://github.com/huggingface/optimum/issues if you would like the model type {model_type} to be supported natively in the ONNX export." ) if task.startswith("text-generation") and model.config.is_encoder_decoder: @@ -569,11 +563,11 @@ def export_from_model( kwargs_shapes[input_name] if input_name in kwargs_shapes else DEFAULT_DUMMY_SHAPES[input_name] ) - onnx_config, models_and_onnx_configs = _get_submodels_and_onnx_configs( + export_config, models_and_export_configs = _get_submodels_and_export_configs( model=model, task=task, monolith=False, - custom_onnx_configs=custom_onnx_configs if custom_onnx_configs is not None else {}, + custom_export_configs=custom_export_configs if custom_export_configs is not None else {}, custom_architecture=custom_architecture, fn_get_submodels=fn_get_submodels, preprocessors=preprocessors, @@ -581,6 +575,7 @@ def export_from_model( model_kwargs=model_kwargs, _variant="default", legacy=False, + exporter="openvino", ) if ov_config is None: @@ -612,18 +607,18 @@ def export_from_model( model_name_or_path = model.config._name_or_path maybe_save_preprocessors(model_name_or_path, output) - files_subpaths = ["openvino_" + model_name + ".xml" for model_name in models_and_onnx_configs.keys()] + files_subpaths = ["openvino_" + model_name + ".xml" for model_name in models_and_export_configs.keys()] else: # save the subcomponent configuration - for model_name in models_and_onnx_configs: - subcomponent = models_and_onnx_configs[model_name][0] + for model_name in models_and_export_configs: + subcomponent = models_and_export_configs[model_name][0] if hasattr(subcomponent, "save_config"): subcomponent.save_config(output / model_name) elif hasattr(subcomponent, "config") and hasattr(subcomponent.config, "save_pretrained"): subcomponent.config.save_pretrained(output / model_name) - files_subpaths = [os.path.join(name_dir, OV_XML_FILE_NAME) for name_dir in models_and_onnx_configs] + files_subpaths = [os.path.join(name_dir, OV_XML_FILE_NAME) for name_dir in models_and_export_configs] # Saving the additional components needed to perform inference. model.scheduler.save_pretrained(output.joinpath("scheduler")) @@ -643,7 +638,7 @@ def export_from_model( model.save_config(output) export_models( - models_and_onnx_configs=models_and_onnx_configs, + models_and_export_configs=models_and_export_configs, output_dir=output, output_names=files_subpaths, input_shapes=input_shapes, diff --git a/optimum/exporters/openvino/model_configs.py b/optimum/exporters/openvino/model_configs.py new file mode 100644 index 0000000000..b6536512b1 --- /dev/null +++ b/optimum/exporters/openvino/model_configs.py @@ -0,0 +1,391 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union + +from packaging import version +from transformers.utils import is_tf_available + +from optimum.exporters.onnx.config import TextDecoderOnnxConfig, TextDecoderWithPositionIdsOnnxConfig +from optimum.exporters.onnx.model_configs import GemmaOnnxConfig +from optimum.exporters.tasks import TasksManager +from optimum.utils import DEFAULT_DUMMY_SHAPES +from optimum.utils.input_generators import ( + DummyInputGenerator, + DummyPastKeyValuesGenerator, + DummyTextInputGenerator, + MistralDummyPastKeyValuesGenerator, +) +from optimum.utils.normalized_config import NormalizedTextConfig + +from .model_patcher import ( + BaichuanModelPatcher, + ChatGLMModelPatcher, + GemmaModelPatcher, + MixtralModelPatcher, + QwenModelPatcher, +) + + +def init_model_configs(): + supported_model_types = [ + "_SUPPORTED_MODEL_TYPE", + "_DIFFUSERS_SUPPORTED_MODEL_TYPE", + "_TIMM_SUPPORTED_MODEL_TYPE", + "_SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE", + ] + + for supported_models_config in supported_model_types: + supported_models = getattr(TasksManager, supported_models_config) + for model, export_configs in supported_models.items(): + if "onnx" not in export_configs: + continue + onnx_config = export_configs["onnx"] + supported_models[model]["openvino"] = deepcopy(onnx_config) + + setattr(TasksManager, supported_models_config, supported_models) + + +init_model_configs() + + +if TYPE_CHECKING: + from transformers.modeling_utils import PreTrainedModel + + from optimum.exporters.onnx.model_patcher import ModelPatcher + + if is_tf_available(): + from transformers.modeling_tf_utils import TFPreTrainedModel + + +register_in_tasks_manager = TasksManager.create_register("openvino", overwrite_existing=True) + + +@register_in_tasks_manager("baichuan", *["text-generation", "text-generation-with-past"], library_name="transformers") +class BaichaunOpenVINOConfig(TextDecoderOnnxConfig): + DEFAULT_ONNX_OPSET = 13 + NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args( + num_layers="num_hidden_layers", num_attention_heads="num_attention_heads", hidden_size="hidden_size" + ) + + def patch_model_for_export( + self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None + ) -> "ModelPatcher": + return BaichuanModelPatcher(self, model, model_kwargs=model_kwargs) + + +@register_in_tasks_manager("qwen2", *["text-generation", "text-generation-with-past"], library_name="transformers") +class Qwen2OpenVINOConfig(TextDecoderWithPositionIdsOnnxConfig): + DEFAULT_ONNX_OPSET = 14 + + DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, MistralDummyPastKeyValuesGenerator) + DUMMY_PKV_GENERATOR_CLASS = MistralDummyPastKeyValuesGenerator + NORMALIZED_CONFIG_CLASS = NormalizedTextConfig + + +@register_in_tasks_manager("minicpm", *["text-generation", "text-generation-with-past"], library_name="transformers") +class MiniCPMOpenVINOConfig(TextDecoderWithPositionIdsOnnxConfig): + DEFAULT_ONNX_OPSET = 14 + + DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, MistralDummyPastKeyValuesGenerator) + DUMMY_PKV_GENERATOR_CLASS = MistralDummyPastKeyValuesGenerator + NORMALIZED_CONFIG_CLASS = NormalizedTextConfig + + +@register_in_tasks_manager("stablelm", *["text-generation", "text-generation-with-past"], library_name="transformers") +class StableLMOpenVINOConfig(TextDecoderWithPositionIdsOnnxConfig): + DEFAULT_ONNX_OPSET = 14 + + DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, MistralDummyPastKeyValuesGenerator) + DUMMY_PKV_GENERATOR_CLASS = MistralDummyPastKeyValuesGenerator + NORMALIZED_CONFIG_CLASS = NormalizedTextConfig + + +class ChatGLM2DummyPastKeyValuesGenerator(DummyPastKeyValuesGenerator): + def __init__( + self, + task: str, + normalized_config: NormalizedTextConfig, + batch_size: int = DEFAULT_DUMMY_SHAPES["batch_size"], + sequence_length: int = DEFAULT_DUMMY_SHAPES["sequence_length"], + random_batch_size_range: Optional[Tuple[int, int]] = None, + random_sequence_length_range: Optional[Tuple[int, int]] = None, + **kwargs, + ): + super().__init__( + task=task, + normalized_config=normalized_config, + batch_size=batch_size, + sequence_length=sequence_length, + random_batch_size_range=random_batch_size_range, + random_sequence_length_range=random_sequence_length_range, + ) + self.multi_query_group_num = normalized_config.multi_query_group_num + self.head_dim = normalized_config.kv_channels + + def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int64", float_dtype: str = "fp32"): + past_key_shape = ( + self.sequence_length, + self.batch_size, + self.multi_query_group_num, + self.head_dim, + ) + past_value_shape = ( + self.sequence_length, + self.batch_size, + self.multi_query_group_num, + self.head_dim, + ) + return [ + ( + self.random_float_tensor(past_key_shape, framework=framework, dtype=float_dtype), + self.random_float_tensor(past_value_shape, framework=framework, dtype=float_dtype), + ) + for _ in range(self.num_layers) + ] + + +@register_in_tasks_manager("chatglm", *["text-generation", "text-generation-with-past"], library_name="transformers") +class ChatGLM2OpenVINOConfig(TextDecoderWithPositionIdsOnnxConfig): + NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(vocab_size="padded_vocab_size", num_layers="num_layers") + DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, ChatGLM2DummyPastKeyValuesGenerator) + DUMMY_PKV_GENERATOR_CLASS = ChatGLM2DummyPastKeyValuesGenerator + + def generate_dummy_inputs(self, framework: str = "pt", **kwargs): + dummy_inputs_generators = self._create_dummy_input_generator_classes(**kwargs) + + dummy_inputs = {} + input_names = [key for key in self.inputs.keys() if not key.startswith("past_key_values")] + if self.use_past_in_inputs and self.use_cache_branch is not False: + input_names.append("past_key_values") + + for input_name in input_names: + input_was_inserted = False + for dummy_input_gen in dummy_inputs_generators: + if dummy_input_gen.supports_input(input_name): + dummy_inputs[input_name] = self.overwrite_shape_and_generate_input( + dummy_input_gen, + input_name, + framework, + input_shapes=kwargs, + ) + input_was_inserted = True + break + if not input_was_inserted: + raise RuntimeError( + f'Could not generate dummy input for "{input_name}". Try adding a proper dummy input generator to the model ONNX config.' + ) + + # refer to https://github.com/huggingface/optimum/pull/764 + if ( + self.use_past_in_inputs + and self.PAD_ATTENTION_MASK_TO_PAST + and self.use_cache_branch is not False + and "attention_mask" in dummy_inputs + ): + # Obtain the past sequence length from the value instead of the key (Bloom). ChatGLM has seq_len in 0 dim instead of -2 + past_present_length = dummy_inputs["input_ids"].shape[1] + dummy_inputs["past_key_values"][0][1].shape[0] + + dummy_inputs["attention_mask"] = DummyInputGenerator.pad_input_on_dim( + dummy_inputs["attention_mask"], + desired_length=past_present_length, + dim=1, + dtype=dummy_inputs["attention_mask"].dtype, + ) + + return dummy_inputs + + def add_past_key_values(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): + """ + Fills `input_or_outputs` mapping with past_key_values dynamic axes considering the direction. + + Args: + inputs_or_outputs (`Dict[str, Dict[int, str]]`): The mapping to fill. + direction (`str`): + either "inputs" or "outputs", it specifies whether `input_or_outputs` is the input mapping or the + output mapping, this is important for axes naming. + """ + if direction not in ["inputs", "outputs"]: + raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') + + if direction == "inputs": + decoder_sequence_name = "past_sequence_length" + name = "past_key_values" + else: + decoder_sequence_name = "past_sequence_length + present_lenght" + name = "present" + + for i in range(self._normalized_config.num_layers): + inputs_or_outputs[f"{name}.{i}.key"] = {1: "batch_size", 0: decoder_sequence_name} + inputs_or_outputs[f"{name}.{i}.value"] = {1: "batch_size", 0: decoder_sequence_name} + + def patch_model_for_export( + self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None + ) -> "ModelPatcher": + return ChatGLMModelPatcher(self, model, model_kwargs=model_kwargs) + + +@register_in_tasks_manager("mixtral", *["text-generation", "text-generation-with-past"], library_name="transformers") +class MixtralOpenVINOConfig(TextDecoderWithPositionIdsOnnxConfig): + # This is because of the patching of torch.triu in AttentionMaskConverter, that exists from transformers>=4.35 + MIN_TRANSFORMERS_VERSION = version.parse("4.34.99") + + # The ONNX export of this architecture needs the Trilu operator support, available since opset 14 + DEFAULT_ONNX_OPSET = 14 + DUMMY_INPUT_GENERATOR_CLASSES = ( + MistralDummyPastKeyValuesGenerator, + ) + TextDecoderOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES + DUMMY_PKV_GENERATOR_CLASS = MistralDummyPastKeyValuesGenerator + NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_key_value_heads="num_key_value_heads", allow_new=True) + + def patch_model_for_export( + self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None + ) -> "ModelPatcher": + return MixtralModelPatcher(self, model, model_kwargs=model_kwargs) + + +@register_in_tasks_manager( + "gemma", + *[ + "feature-extraction", + "feature-extraction-with-past", + "text-generation", + "text-generation-with-past", + "text-classification", + ], + library_name="transformers", +) +class GemmaOpenVINOConfig(GemmaOnnxConfig): + def patch_model_for_export( + self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None + ) -> "ModelPatcher": + return GemmaModelPatcher(self, model, model_kwargs=model_kwargs) + + +class QwenDummyPastKeyValuesGenerator(DummyPastKeyValuesGenerator): + def __init__( + self, + task: str, + normalized_config: NormalizedTextConfig, + batch_size: int = DEFAULT_DUMMY_SHAPES["batch_size"], + sequence_length: int = DEFAULT_DUMMY_SHAPES["sequence_length"], + random_batch_size_range: Optional[Tuple[int, int]] = None, + random_sequence_length_range: Optional[Tuple[int, int]] = None, + **kwargs, + ): + super().__init__( + task=task, + normalized_config=normalized_config, + batch_size=batch_size, + sequence_length=sequence_length, + random_batch_size_range=random_batch_size_range, + random_sequence_length_range=random_sequence_length_range, + ) + self.kv_channels = normalized_config.kv_channels + + def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int64", float_dtype: str = "fp32"): + past_key_shape = (self.batch_size, self.sequence_length, self.num_attention_heads, self.kv_channels) + past_value_shape = (self.batch_size, self.sequence_length, self.num_attention_heads, self.kv_channels) + return [ + ( + self.random_float_tensor(past_key_shape, framework=framework, dtype=float_dtype), + self.random_float_tensor(past_value_shape, framework=framework, dtype=float_dtype), + ) + for _ in range(self.num_layers) + ] + + +@register_in_tasks_manager("qwen", *["text-generation", "text-generation-with-past"]) +class QwenOpenVINOConfig(TextDecoderWithPositionIdsOnnxConfig): + DEFAULT_ONNX_OPSET = 14 + NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args( + num_layers="num_hidden_layers", num_attention_heads="num_attention_heads", hidden_size="hidden_size" + ) + DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, QwenDummyPastKeyValuesGenerator) + DUMMY_PKV_GENERATOR_CLASS = QwenDummyPastKeyValuesGenerator + no_position_ids = False + + def generate_dummy_inputs(self, framework: str = "pt", **kwargs): + dummy_inputs_generators = self._create_dummy_input_generator_classes(**kwargs) + + dummy_inputs = {} + input_names = [key for key in self.inputs.keys() if not key.startswith("past_key_values")] + if self.use_past_in_inputs and self.use_cache_branch is not False: + input_names.append("past_key_values") + + for input_name in input_names: + input_was_inserted = False + for dummy_input_gen in dummy_inputs_generators: + if dummy_input_gen.supports_input(input_name): + dummy_inputs[input_name] = self.overwrite_shape_and_generate_input( + dummy_input_gen, + input_name, + framework, + input_shapes=kwargs, + ) + input_was_inserted = True + break + if not input_was_inserted: + raise RuntimeError( + f'Could not generate dummy input for "{input_name}". Try adding a proper dummy input generator to the model ONNX config.' + ) + + # refer to https://github.com/huggingface/optimum/pull/764 + if ( + self.use_past_in_inputs + and self.PAD_ATTENTION_MASK_TO_PAST + and self.use_cache_branch is not False + and "attention_mask" in dummy_inputs + ): + # Obtain the past sequence length from the value instead of the key (Bloom). Qwen has seq_len in 1 dim instead of -2 + past_present_length = dummy_inputs["input_ids"].shape[1] + dummy_inputs["past_key_values"][0][1].shape[1] + + dummy_inputs["attention_mask"] = DummyInputGenerator.pad_input_on_dim( + dummy_inputs["attention_mask"], + desired_length=past_present_length, + dim=1, + dtype=dummy_inputs["attention_mask"].dtype, + ) + + return dummy_inputs + + def add_past_key_values(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): + """ + Fills `input_or_outputs` mapping with past_key_values dynamic axes considering the direction. + + Args: + inputs_or_outputs (`Dict[str, Dict[int, str]]`): The mapping to fill. + direction (`str`): + either "inputs" or "outputs", it specifies whether `input_or_outputs` is the input mapping or the + output mapping, this is important for axes naming. + """ + if direction not in ["inputs", "outputs"]: + raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') + + if direction == "inputs": + decoder_sequence_name = "past_sequence_length" + name = "past_key_values" + else: + decoder_sequence_name = "past_sequence_length + 1" + name = "present" + + for i in range(self._normalized_config.num_layers): + inputs_or_outputs[f"{name}.{i}.key"] = {0: "batch_size", 1: decoder_sequence_name} + inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch_size", 1: decoder_sequence_name} + + def patch_model_for_export( + self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None + ) -> "ModelPatcher": + return QwenModelPatcher(self, model, model_kwargs=model_kwargs) diff --git a/optimum/exporters/openvino/model_patcher.py b/optimum/exporters/openvino/model_patcher.py index 91dc48df05..371fee732a 100644 --- a/optimum/exporters/openvino/model_patcher.py +++ b/optimum/exporters/openvino/model_patcher.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,15 @@ # limitations under the License. import logging as log +import types +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +import torch +import torch.nn.functional as F +from transformers.modeling_outputs import BaseModelOutputWithPast +from transformers.utils import is_tf_available + +from optimum.exporters.onnx.model_patcher import DecoderModelPatcher from optimum.intel.utils.import_utils import ( _openvino_version, _torch_version, @@ -24,6 +32,15 @@ ) +if TYPE_CHECKING: + from transformers.modeling_utils import PreTrainedModel + + from optimum.exporters.onnx.config import OnnxConfig + + if is_tf_available(): + from transformers.modeling_tf_utils import TFPreTrainedModel + + def patch_model_with_bettertransformer(model): COLOR_RED = "\033[1;31m" COLOR_RESET = "\033[0m" @@ -71,3 +88,425 @@ def patch_model_with_bettertransformer(model): return model return model + + +def _mixtral_sparse_moe_block_forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + """ """ + batch_size, sequence_length, hidden_dim = hidden_states.shape + hidden_states = hidden_states.view(-1, hidden_dim) + # router_logits: (batch * sequence_length, n_experts) + router_logits = self.gate(hidden_states) + + routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) + routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) + routing_weights /= routing_weights.sum(dim=-1, keepdim=True) + # we cast back to the input dtype + routing_weights = routing_weights.to(hidden_states.dtype) + + final_hidden_states = torch.zeros( + (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device + ) + + # One hot encode the selected experts to create an expert mask + # this will be used to easily index which expert is going to be sollicitated + expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) + + # Loop over all available experts in the model and perform the computation on each expert + for expert_idx in range(self.num_experts): + expert_layer = self.experts[expert_idx] + idx, top_x = torch.where(expert_mask[expert_idx]) + + # Index the correct hidden states and compute the expert hidden state for + # the current expert. We need to make sure to multiply the output hidden + # states by `routing_weights` on the corresponding tokens (top-1 and top-2) + current_state = hidden_states[None, top_x].reshape(-1, hidden_dim) + current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None] + + final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) + final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) + return final_hidden_states, router_logits + + +class MixtralModelPatcher(DecoderModelPatcher): + def __enter__(self): + super().__enter__() + for layer in self._model.model.layers: + layer.block_sparse_moe._unpatched_forward = layer.block_sparse_moe.forward + layer.block_sparse_moe.forward = types.MethodType( + _mixtral_sparse_moe_block_forward, layer.block_sparse_moe + ) + + def __exit__(self, exc_type, exc_value, traceback): + super().__exit__(exc_type, exc_value, traceback) + for layer in self._model.model.layers: + layer.block_sparse_moe.forward = layer.block_sparse_moe._unpatched_forward + + +def _chatglm_transformer_forward( + self, + input_ids, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.BoolTensor] = None, + full_attention_mask: Optional[torch.BoolTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, +): + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, seq_length = input_ids.shape + + if inputs_embeds is None: + inputs_embeds = self.embedding(input_ids) + + if self.pre_seq_len is not None: + if past_key_values is None: + past_key_values = self.get_prompt( + batch_size=batch_size, + device=input_ids.device, + dtype=inputs_embeds.dtype, + ) + if attention_mask is not None: + attention_mask = torch.cat( + [ + attention_mask.new_ones((batch_size, self.pre_seq_len)), + attention_mask, + ], + dim=-1, + ) + + if full_attention_mask is None: + if past_key_values is not None: + full_attention_mask = torch.ones( + batch_size, + seq_length, + seq_length, + device=input_ids.device, + dtype=torch.float, + ) * float("-inf") + full_attention_mask.triu_(diagonal=1) + past_length = 0 + if past_key_values: + past_length = past_key_values[0][0].shape[0] + if past_length: + full_attention_mask = torch.cat( + ( + torch.zeros(batch_size, seq_length, past_length, device=input_ids.device), + full_attention_mask, + ), + dim=-1, + ) + full_attention_mask.unsqueeze_(1) + + # Rotary positional embeddings + rotary_pos_emb = self.rotary_pos_emb(self.seq_length) + if position_ids is not None: + rotary_pos_emb = rotary_pos_emb[position_ids] + else: + rotary_pos_emb = rotary_pos_emb[None, :seq_length] + rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous() + + # Run encoder. + hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder( + inputs_embeds, + full_attention_mask, + rotary_pos_emb=rotary_pos_emb, + kv_caches=past_key_values, + use_cache=use_cache, + output_hidden_states=output_hidden_states, + ) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +def _chatglm2_get_context_layer(query_layer: torch.Tensor, key_layer: torch.Tensor, value_layer: torch.Tensor): + mask = torch.zeros((query_layer.shape[-2], key_layer.shape[-2]), dtype=query_layer.dtype) + if query_layer.shape[2] == key_layer.shape[2]: + tmp_mask = torch.ones((query_layer.shape[-2], key_layer.shape[-2]), dtype=torch.bool).triu(diagonal=1) + mask.masked_fill_(tmp_mask, float("-inf")) + + context_layer = torch.nn.functional.scaled_dot_product_attention( + query_layer, key_layer, value_layer, attn_mask=mask + ) + return context_layer + + +def _chatglm2_core_attention_forward(self, query_layer, key_layer, value_layer, attention_mask): + query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]] + if attention_mask is None: + context_layer = _chatglm2_get_context_layer(query_layer, key_layer, value_layer) + else: + context_layer = torch.nn.functional.scaled_dot_product_attention( + query_layer, key_layer, value_layer, attention_mask + ) + context_layer = context_layer.permute(2, 0, 1, 3) + new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) + context_layer = context_layer.reshape(*new_context_layer_shape) + + return context_layer + + +class ChatGLMModelPatcher(DecoderModelPatcher): + def __init__( + self, + config: "OnnxConfig", + model: Union["PreTrainedModel", "TFPreTrainedModel"], + model_kwargs: Dict[str, Any], + ): + super().__init__(config, model, model_kwargs) + + self.original_chatglm_transformer_forward = model.transformer.forward + + def __enter__(self): + super().__enter__() + self._model.transformer.forward = types.MethodType(_chatglm_transformer_forward, self._model.transformer) + for block in self._model.transformer.encoder.layers: + block.self_attention.core_attention._orig_forward = block.self_attention.core_attention.forward + block.self_attention.core_attention.forward = types.MethodType( + _chatglm2_core_attention_forward, block.self_attention.core_attention + ) + + def __exit__(self, exc_type, exc_value, traceback): + super().__exit__(exc_type, exc_value, traceback) + self._model.transformer.forward = self.original_chatglm_transformer_forward + for block in self._model.transformer.encoder.layers: + block.self_attention.core_attention.forward = block.self_attention.core_attention._orig_forward + + +class GemmaModelPatcher(DecoderModelPatcher): + def __enter__(self): + super().__enter__() + + # init inv_freq for torchscript tracing + # https://github.com/huggingface/transformers/blob/ed74d97871468f3a4695ede50abdc0b55717a84d/src/transformers/models/gemma/modeling_gemma.py#L108 + for layer in self._model.model.layers: + if layer.self_attn.rotary_emb.inv_freq is None: + rotary_emb = layer.self_attn.rotary_emb + layer.self_attn.rotary_emb.inv_freq = 1.0 / ( + rotary_emb.base ** (torch.arange(0, rotary_emb.dim, 2, dtype=torch.int64).float() / rotary_emb.dim) + ) + + +SUPPORT_SDPA = is_torch_version(">", "2.1.0") + + +def _qwen_rotate_half(x): + from einops import rearrange + + x = rearrange(x, "... (j d) -> ... j d", j=2) + x1, x2 = x.unbind(dim=-2) + return torch.cat((-x2, x1), dim=-1) + + +def _qwen_apply_rotary_pos_emb(t, freqs): + cos, sin = freqs + rot_dim = freqs[0].shape[-1] + cos, sin = freqs + t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:] + t_ = t_.float() + t_pass_ = t_pass_.float() + t_ = (t_ * cos) + (_qwen_rotate_half(t_) * sin) + return torch.cat((t_, t_pass_), dim=-1).type_as(t) + + +def _qwen_quantize_cache_v(fdata, bits, qmax, qmin): + # b, s, head, h-dim->b, head, s, h-dim + qtype = torch.uint8 + device = fdata.device + shape = fdata.shape + + fdata_cal = torch.flatten(fdata, 2) + fmax = torch.amax(fdata_cal, dim=-1, keepdim=True) + fmin = torch.amin(fdata_cal, dim=-1, keepdim=True) + # Compute params + if qmax.device != fmax.device: + qmax = qmax.to(device) + qmin = qmin.to(device) + scale = (fmax - fmin) / (qmax - qmin) + zero = qmin - fmin / scale + scale = scale.unsqueeze(-1).repeat(1, 1, shape[2], 1).contiguous() + zero = zero.unsqueeze(-1).repeat(1, 1, shape[2], 1).contiguous() + # Quantize + res_data = fdata / scale + zero + qdata = torch.clamp(res_data, qmin, qmax).to(qtype) + return qdata.contiguous(), scale, zero + + +def _qwen_attention_forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None, + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, +): + mixed_x_layer = self.c_attn(hidden_states) + + query, key, value = mixed_x_layer.split(self.split_size, dim=2) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if rotary_pos_emb_list is not None: + cur_len = query.shape[1] + if len(rotary_pos_emb_list) == 1: + rotary_pos_emb = rotary_pos_emb_list[0] + rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] + rotary_pos_emb = (rotary_pos_emb,) * 2 + q_pos_emb, k_pos_emb = rotary_pos_emb + # Slice the pos emb for current inference + query = _qwen_apply_rotary_pos_emb(query, q_pos_emb) + key = _qwen_apply_rotary_pos_emb(key, k_pos_emb) + else: + query_list = [] + key_list = [] + for i, rotary_pos_emb in enumerate(rotary_pos_emb_list): + rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] + rotary_pos_emb = (rotary_pos_emb,) * 2 + q_pos_emb, k_pos_emb = rotary_pos_emb + # Slice the pos emb for current inference + query_list += [_qwen_apply_rotary_pos_emb(query[i : i + 1, :, :], q_pos_emb)] + key_list += [_qwen_apply_rotary_pos_emb(key[i : i + 1, :, :], k_pos_emb)] + query = torch.cat(query_list, dim=0) + key = torch.cat(key_list, dim=0) + + if self.use_cache_quantization: + key = _qwen_quantize_cache_v(key.permute(0, 2, 1, 3), bits=8, qmin=self.cache_qmin, qmax=self.cache_qmax) + value = _qwen_quantize_cache_v(value.permute(0, 2, 1, 3), bits=8, qmin=self.cache_qmin, qmax=self.cache_qmax) + + if layer_past is not None: + past_key, past_value = layer_past[0], layer_past[1] + if self.use_cache_quantization: + # use_cache_quantization: + # present=((q_key,key_scale,key_zero_point), + # (q_value,value_scale,value_zero_point)) + key = ( + torch.cat((past_key[0], key[0]), dim=2), + torch.cat((past_key[1], key[1]), dim=2), + torch.cat((past_key[2], key[2]), dim=2), + ) + value = ( + torch.cat((past_value[0], value[0]), dim=2), + torch.cat((past_value[1], value[1]), dim=2), + torch.cat((past_value[2], value[2]), dim=2), + ) + else: + # not use_cache_quantization: + # present=(key,value) + key = torch.cat((past_key, key), dim=1) + value = torch.cat((past_value, value), dim=1) + + if use_cache: + present = (key, value) + else: + present = None + + if self.use_logn_attn and not self.training: + if self.use_cache_quantization: + seq_start = key[0].size(2) - query.size(1) + seq_end = key[0].size(2) + else: + seq_start = key.size(1) - query.size(1) + seq_end = key.size(1) + logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :].type_as(query) + query = query * logn_tensor.expand_as(query) + + if self.use_flash_attn and not self.is_fp32 and query.is_cuda: + q, k, v = query, key, value + attn_output = self.core_attention_flash(q, k, v, attention_mask=attention_mask) + else: + registered_causal_mask = torch.tril( + torch.ones((key.size(1), key.size(1)), dtype=torch.bool, device=key.device) + ).view(1, 1, key.size(1), key.size(1)) + query = query.permute(0, 2, 1, 3) + if not self.use_cache_quantization: + key = key.permute(0, 2, 1, 3) + value = value.permute(0, 2, 1, 3) + + if not self.use_cache_quantization and SUPPORT_SDPA: + causal_mask = registered_causal_mask[:, :, key.size(-2) - query.size(-2) : key.size(-2), : key.size(-2)] + if attention_mask is not None: + attention_mask = attention_mask.expand(-1, -1, causal_mask.size(2), -1).masked_fill( + ~causal_mask, torch.finfo(query.dtype).min + ) + else: + attention_mask = causal_mask + attn_output = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask).transpose(1, 2) + attn_weight = None + else: + attn_output, attn_weight = self._attn(query, key, value, registered_causal_mask, attention_mask, head_mask) + context_layer = self._merge_heads(attn_output, self.num_heads, self.head_dim) + + attn_output = self.c_proj(context_layer) + + outputs = (attn_output, present) + if output_attentions: + if self.use_flash_attn and not self.is_fp32: + raise ValueError("Cannot output attentions while using flash-attn") + else: + outputs += (attn_weight,) + + return outputs + + +class QwenModelPatcher(DecoderModelPatcher): + def __init__( + self, + config: "OnnxConfig", + model: Union["PreTrainedModel", "TFPreTrainedModel"], + model_kwargs: Dict[str, Any], + ): + super().__init__(config, model, model_kwargs) + + self.original_fp16 = model.config.fp16 + self.original_bf16 = model.config.bf16 + model.config.bf16 = False + model.config.fp16 = False + if self.original_fp16 or self.original_bf16: + model.to(torch.float32) + model.transformer.rotary_emb(2048) + + def __enter__(self): + super().__enter__() + for block in self._model.transformer.h: + block.attn._orig_forward = block.attn.forward + block.attn.forward = types.MethodType(_qwen_attention_forward, block.attn) + + def __exit__(self, exc_type, exc_value, traceback): + super().__exit__(exc_type, exc_value, traceback) + for block in self._model.transformer.h: + block.attn.forward = block.attn._orig_forward + self._model.config.bf16 = self.original_bf16 + self._model.config.fp16 = self.original_fp16 + + +class BaichuanModelPatcher(DecoderModelPatcher): + def __init__( + self, + config: "OnnxConfig", + model: Union["PreTrainedModel", "TFPreTrainedModel"], + model_kwargs: Dict[str, Any], + ): + super().__init__(config, model, model_kwargs) + # model has first inference buffers initialization + if self._model.lm_head.first_flag: + self._model(torch.ones((1, 10), dtype=torch.int64), torch.ones((1, 10), dtype=torch.int64)) diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index 53aa05bc5a..832c132615 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -316,7 +316,9 @@ def _reshape( shapes[inputs][0] = -1 input_name = inputs.get_any_name() if input_name.startswith("past_key_values"): - if len(inputs.partial_shape) == 3 and input_name.endswith("value"): + if ( + len(inputs.partial_shape) == 3 and input_name.endswith("value") + ) or self.config.model_type == "chatglm": shapes[inputs][1] = -1 else: shapes[inputs][2] = -1 diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index c46f29092b..2022a495d8 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -350,7 +350,7 @@ def _quantize_torchmodel( model_type = self.model.config.model_type.replace("_", "-") onnx_config_class = TasksManager.get_exporter_config_constructor( - exporter="onnx", + exporter="openvino", model=self.model, task=self.task, model_type=model_type, diff --git a/setup.py b/setup.py index 3a1e1123d0..5c6cf76404 100644 --- a/setup.py +++ b/setup.py @@ -28,8 +28,8 @@ INSTALL_REQUIRE = [ "torch>=1.11", - "optimum~=1.17", "transformers>=4.36.0,<4.39.0", + "optimum @ git+https://github.com/huggingface/optimum.git#egg=optimum", "datasets>=1.4.0", "sentencepiece", "scipy", @@ -50,6 +50,8 @@ "timm", "invisible-watermark>=0.2.0", "auto-gptq", + "transformers_stream_generator", + "einops", ] QUALITY_REQUIRE = ["black~=23.1", "ruff>=0.0.241"] diff --git a/tests/openvino/test_modeling.py b/tests/openvino/test_modeling.py index 2188b7061f..9df6c73214 100644 --- a/tests/openvino/test_modeling.py +++ b/tests/openvino/test_modeling.py @@ -28,6 +28,7 @@ from parameterized import parameterized from PIL import Image from transformers import ( + AutoConfig, AutoFeatureExtractor, AutoModel, AutoModelForAudioClassification, @@ -52,7 +53,6 @@ from transformers.onnx.utils import get_preprocessor from utils_tests import MODEL_NAMES -from optimum.exporters.onnx import MODEL_TYPES_REQUIRING_POSITION_IDS from optimum.intel import ( OVModelForAudioClassification, OVModelForAudioFrameClassification, @@ -473,73 +473,101 @@ def test_pipeline(self, model_arch): class OVModelForCausalLMIntegrationTest(unittest.TestCase): SUPPORTED_ARCHITECTURES = ( "bart", + "baichuan2", "gpt_bigcode", "blenderbot", "blenderbot-small", "bloom", + "chatglm", "codegen", # "data2vec-text", # TODO : enable when enabled in exporters + "gemma", "gpt2", "gpt_neo", "gpt_neox", "llama", # "llama_gptq", "marian", + "minicpm", "mistral", + "mixtral", "mpt", "opt", "pegasus", + "qwen", + "qwen2", + "stablelm", ) GENERATION_LENGTH = 100 IS_SUPPORT_STATEFUL = is_openvino_version(">=", "2023.3") + REMOTE_CODE_MODELS = ("chatglm", "minicpm", "baichuan2", "jais", "qwen") @parameterized.expand(SUPPORTED_ARCHITECTURES) def test_compare_to_transformers(self, model_arch): model_id = MODEL_NAMES[model_arch] + not_stateful = ["gpt_bigcode"] + if is_openvino_version("<", "2024.0"): + not_stateful.append("mixtral") + + if is_openvino_version("<", "2024.1"): + not_stateful.extend(["llama", "gemma"]) if "gptq" in model_arch: self.skipTest("GPTQ model loading unsupported with AutoModelForCausalLM") set_seed(SEED) - ov_model = OVModelForCausalLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) + + model_kwargs = {} + if model_arch in self.REMOTE_CODE_MODELS: + model_kwargs = { + "config": AutoConfig.from_pretrained(model_id, trust_remote_code=True), + "trust_remote_code": True, + } + ov_model = OVModelForCausalLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG, **model_kwargs) self.assertIsInstance(ov_model.config, PretrainedConfig) self.assertTrue(ov_model.use_cache) - - transformers_model = AutoModelForCausalLM.from_pretrained(model_id) - tokenizer = AutoTokenizer.from_pretrained(model_id) + self.assertEqual( + ov_model.stateful, self.IS_SUPPORT_STATEFUL and ov_model.config.model_type not in not_stateful + ) + transformers_model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs) + tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS) + if model_arch == "qwen": + transformers_model.to(torch.float32) tokens = tokenizer( "This is a sample", return_tensors="pt", return_token_type_ids=False if model_arch == "llama" else None ) - position_ids = None - if model_arch.replace("_", "-") in MODEL_TYPES_REQUIRING_POSITION_IDS: - input_shape = tokens["input_ids"].shape - position_ids = torch.arange(0, input_shape[-1], dtype=torch.long).unsqueeze(0).view(-1, input_shape[-1]) - ov_outputs = ov_model(**tokens, position_ids=position_ids) + ov_outputs = ov_model(**tokens) self.assertTrue("logits" in ov_outputs) self.assertIsInstance(ov_outputs.logits, torch.Tensor) self.assertTrue("past_key_values" in ov_outputs) self.assertIsInstance(ov_outputs.past_key_values, tuple) - - is_stateful = ov_model.config.model_type not in {"gpt_bigcode", "llama"} and self.IS_SUPPORT_STATEFUL + is_stateful = ov_model.config.model_type not in not_stateful and self.IS_SUPPORT_STATEFUL self.assertEqual(ov_model.stateful, is_stateful) if is_stateful: self.assertTrue(len(ov_outputs.past_key_values) == 1 and len(ov_outputs.past_key_values[0]) == 0) - with torch.no_grad(): transformers_outputs = transformers_model(**tokens) # Compare tensor outputs - self.assertTrue(torch.allclose(ov_outputs.logits, transformers_outputs.logits, atol=1e-4)) + self.assertTrue(torch.allclose(ov_outputs.logits, transformers_outputs.logits, equal_nan=True, atol=1e-4)) del transformers_model del ov_model gc.collect() @parameterized.expand(SUPPORTED_ARCHITECTURES) def test_pipeline(self, model_arch): + model_kwargs = {} model_id = MODEL_NAMES[model_arch] - tokenizer = AutoTokenizer.from_pretrained(model_id) - model = OVModelForCausalLM.from_pretrained(model_id, export=True, use_cache=False, compile=False) + if model_arch in self.REMOTE_CODE_MODELS: + model_kwargs = { + "config": AutoConfig.from_pretrained(model_id, trust_remote_code=True), + "trust_remote_code": True, + } + tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS) + model = OVModelForCausalLM.from_pretrained( + model_id, export=True, use_cache=False, compile=False, **model_kwargs + ) model.config.encoder_no_repeat_ngram_size = 0 model.to("cpu") model.half() @@ -556,8 +584,16 @@ def test_pipeline(self, model_arch): def test_multiple_inputs(self, model_arch): model_id = MODEL_NAMES[model_arch] set_seed(SEED) - model = OVModelForCausalLM.from_pretrained(model_id, export=True, compile=False) - tokenizer = AutoTokenizer.from_pretrained(model_id) + if model_arch == "qwen": + self.skipTest("Qwen tokenizer does not support padding") + model_kwargs = {} + if model_arch in self.REMOTE_CODE_MODELS: + model_kwargs = { + "config": AutoConfig.from_pretrained(model_id, trust_remote_code=True), + "trust_remote_code": True, + } + model = OVModelForCausalLM.from_pretrained(model_id, export=True, compile=False, **model_kwargs) + tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS) tokenizer.pad_token = tokenizer.eos_token texts = ["this is a simple input", "this is a second simple input", "this is a third simple input"] tokens = tokenizer(texts, padding=True, return_tensors="pt") diff --git a/tests/openvino/utils_tests.py b/tests/openvino/utils_tests.py index 97c8a92836..ad3cd03d3d 100644 --- a/tests/openvino/utils_tests.py +++ b/tests/openvino/utils_tests.py @@ -22,12 +22,14 @@ "beit": "hf-internal-testing/tiny-random-BeitForImageClassification", "bert": "hf-internal-testing/tiny-random-bert", "bart": "hf-internal-testing/tiny-random-bart", + "baichuan2": "katuni4ka/tiny-random-baichuan2", "bigbird_pegasus": "hf-internal-testing/tiny-random-bigbird_pegasus", "blenderbot-small": "hf-internal-testing/tiny-random-BlenderbotModel", "blenderbot": "hf-internal-testing/tiny-random-BlenderbotModel", "bloom": "hf-internal-testing/tiny-random-BloomModel", "camembert": "hf-internal-testing/tiny-random-camembert", "convbert": "hf-internal-testing/tiny-random-ConvBertForSequenceClassification", + "chatglm": "katuni4ka/tiny-random-chatglm2", "codegen": "hf-internal-testing/tiny-random-CodeGenForCausalLM", "data2vec_text": "hf-internal-testing/tiny-random-Data2VecTextModel", "data2vec_vision": "hf-internal-testing/tiny-random-Data2VecVisionModel", @@ -38,6 +40,7 @@ "convnext": "hf-internal-testing/tiny-random-convnext", "distilbert": "hf-internal-testing/tiny-random-distilbert", "electra": "hf-internal-testing/tiny-random-electra", + "gemma": "fxmarty/tiny-random-GemmaForCausalLM", "flaubert": "hf-internal-testing/tiny-random-flaubert", "gpt_bigcode": "hf-internal-testing/tiny-random-GPTBigCodeModel", "gpt2": "hf-internal-testing/tiny-random-gpt2", @@ -55,7 +58,9 @@ "opt125m": "facebook/opt-125m", "marian": "sshleifer/tiny-marian-en-de", "mbart": "hf-internal-testing/tiny-random-mbart", + "minicpm": "katuni4ka/tiny-random-minicpm", "mistral": "echarlaix/tiny-random-mistral", + "mixtral": "TitanML/tiny-mixtral", "mobilebert": "hf-internal-testing/tiny-random-MobileBertModel", "mobilenet_v1": "google/mobilenet_v1_0.75_192", "mobilenet_v2": "hf-internal-testing/tiny-random-MobileNetV2Model", @@ -66,6 +71,8 @@ "pegasus": "hf-internal-testing/tiny-random-pegasus", "pix2struct": "fxmarty/pix2struct-tiny-random", "poolformer": "hf-internal-testing/tiny-random-PoolFormerModel", + "qwen": "katuni4ka/tiny-random-qwen", + "qwen2": "Qwen/Qwen1.5-0.5B", "resnet": "hf-internal-testing/tiny-random-resnet", "roberta": "hf-internal-testing/tiny-random-roberta", "roformer": "hf-internal-testing/tiny-random-roformer", @@ -76,6 +83,7 @@ "stable-diffusion": "hf-internal-testing/tiny-stable-diffusion-torch", "stable-diffusion-xl": "echarlaix/tiny-random-stable-diffusion-xl", "stable-diffusion-xl-refiner": "echarlaix/tiny-random-stable-diffusion-xl-refiner", + "stablelm": "hf-internal-testing/tiny-random-StableLmForCausalLM", "latent-consistency": "echarlaix/tiny-random-latent-consistency", "sew": "hf-internal-testing/tiny-random-SEWModel", "sew_d": "asapp/sew-d-tiny-100k-ft-ls100h", From f03b3569a523bc6760a2b34bc3e0dc62dbb2c9b1 Mon Sep 17 00:00:00 2001 From: Helena Kloosterman Date: Fri, 15 Mar 2024 15:37:20 +0100 Subject: [PATCH 4/8] Update transformers version warning for gemma (#609) --- optimum/exporters/openvino/model_patcher.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/optimum/exporters/openvino/model_patcher.py b/optimum/exporters/openvino/model_patcher.py index 371fee732a..a31287d84f 100644 --- a/optimum/exporters/openvino/model_patcher.py +++ b/optimum/exporters/openvino/model_patcher.py @@ -60,7 +60,7 @@ def patch_model_with_bettertransformer(model): ) if ( - getattr(model.config, "model_type") in {"gpt_bigcode", "llama"} + getattr(model.config, "model_type") in {"gpt_bigcode", "llama", "gemma"} and is_transformers_version(">=", "4.38") and is_openvino_version("<", "2024.1.0-14612") ): @@ -69,10 +69,11 @@ def patch_model_with_bettertransformer(model): _openvino_version.split("-")[0] if is_openvino_version("<=", "2024.0.0-14509") else _openvino_version ) log.warn( - COLOR_RED + f"[WARNING] Stateful models are not supported for Llama and GPTBigCode with Transformers " + COLOR_RED + + f"[WARNING] Stateful models are not supported for Llama, Gemma and GPTBigCode with Transformers " f"{_transformers_version} and OpenVINO {display_version}. For good performance, consider using a nightly OpenVINO build: " - "https://docs.openvino.ai/2024/get-started/install-openvino.html. For models that do not need transformers " - "4.38+, it is also an option to downgrade transformers: `pip install transformers==4.37.2`" + COLOR_RESET + "https://docs.openvino.ai/2024/get-started/install-openvino.html. For gpt-bigcode and llama models, " + "it is also an option to downgrade transformers: `pip install transformers==4.37.2`" + COLOR_RESET ) # model already has required SDPA implementation From 334fc103be4b86aa8a35781721057c553b8538b5 Mon Sep 17 00:00:00 2001 From: Helena Kloosterman Date: Fri, 15 Mar 2024 15:37:44 +0100 Subject: [PATCH 5/8] Add test for openvino-nightly (#607) --- .github/workflows/test_openvino.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/test_openvino.yml b/.github/workflows/test_openvino.yml index 6d709eecfd..ba5b09ff81 100644 --- a/.github/workflows/test_openvino.yml +++ b/.github/workflows/test_openvino.yml @@ -36,3 +36,9 @@ jobs: - name: Test with Pytest run: | pytest tests/openvino/ --ignore test_modeling_basic + - name: Test openvino-nightly + run: | + pip uninstall -y openvino + pip install openvino-nightly + python -c "from optimum.intel import OVModelForCausalLM; OVModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2', export=True, compile=False)" + optimum-cli export openvino -m hf-internal-testing/tiny-random-gpt2 gpt2-ov From 948f99d20a93a066351f7ee6a63b204fc48fc4b4 Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Fri, 15 Mar 2024 18:41:56 +0400 Subject: [PATCH 6/8] Apply bettertransformer by default for causallm (#605) --- optimum/exporters/openvino/convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/exporters/openvino/convert.py b/optimum/exporters/openvino/convert.py index dfca80f001..8c49994874 100644 --- a/optimum/exporters/openvino/convert.py +++ b/optimum/exporters/openvino/convert.py @@ -293,7 +293,7 @@ def export_pytorch( logger.info(f"Using framework PyTorch: {torch.__version__}") output = Path(output) - if stateful: + if ensure_export_task_support_stateful(config.task): # Trigger bettertransformer together with stateful model because OpenVINO HW-dependent transformations expect # both of them are applied to demonstrate the best performance. # TODO: Consider applying bettertransformer regardless of stateful flag -- requires additional validation. From cd3bc8a6907e7fda0da81fd8d3a6e9f6f2c759a8 Mon Sep 17 00:00:00 2001 From: Helena Kloosterman Date: Fri, 15 Mar 2024 15:48:22 +0100 Subject: [PATCH 7/8] Fix notebooks (#603) - use dataset for audio example to fix permission issue with URL - change pytorch_model.bin to model.safetensors - speed up quantization notebook test by using only 10 samples - update half() messaging (no longer needed to do half() on GPU) --- .github/workflows/test_openvino_notebooks.yml | 2 + .../openvino/optimum_openvino_inference.ipynb | 428 +++++++++--------- .../question_answering_quantization.ipynb | 10 +- 3 files changed, 228 insertions(+), 212 deletions(-) diff --git a/.github/workflows/test_openvino_notebooks.yml b/.github/workflows/test_openvino_notebooks.yml index abc2a65440..7b037d0565 100644 --- a/.github/workflows/test_openvino_notebooks.yml +++ b/.github/workflows/test_openvino_notebooks.yml @@ -49,5 +49,7 @@ jobs: - name: Test with Pytest run: | + sed -i 's/NUM_TRAIN_ITEMS = 600/NUM_TRAIN_ITEMS = 10/' notebooks/openvino/question_answering_quantization.ipynb + sed -i 's/# %pip install/%pip install/' notebooks/openvino/optimum_openvino_inference.ipynb python -m pytest --nbval-lax notebooks/openvino/optimum_openvino_inference.ipynb notebooks/openvino/question_answering_quantization.ipynb diff --git a/notebooks/openvino/optimum_openvino_inference.ipynb b/notebooks/openvino/optimum_openvino_inference.ipynb index 446e668911..b94238d358 100644 --- a/notebooks/openvino/optimum_openvino_inference.ipynb +++ b/notebooks/openvino/optimum_openvino_inference.ipynb @@ -9,7 +9,7 @@ "\n", "This notebook is a playground for running inference with OpenVINO on Transformers models with Optimum. The first part of this notebook explains the different ways to load a model, and some options specific to OpenVINO, like doing inference on an Intel GPU. The second part of this notebook consists of small examples for different supported tasks. \n", "\n", - "Do not forget to install the required dependencies before running this notebook with `pip install optimum[openvino] ipywidgets pillow torchaudio` or uncomment the cell below to install these requirements in your current Python environment. The audio classification example requires [ffmpeg](https://ffmpeg.org/download.html)." + "Do not forget to install the required dependencies before running this notebook by uncommenting the cell below to install these requirements in your current Python environment. The audio classification example requires [ffmpeg](https://ffmpeg.org/download.html)." ] }, { @@ -17,18 +17,11 @@ "execution_count": 1, "id": "6a6774ad-912b-4053-b7f6-14dc020807ef", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:17.121564Z", - "iopub.status.busy": "2023-03-12T19:47:17.121264Z", - "iopub.status.idle": "2023-03-12T19:47:17.125098Z", - "shell.execute_reply": "2023-03-12T19:47:17.124669Z", - "shell.execute_reply.started": "2023-03-12T19:47:17.121531Z" - }, "tags": [] }, "outputs": [], "source": [ - "# %pip install optimum[openvino] ipywidgets pillow torchaudio" + "# %pip install optimum[openvino] ipywidgets pillow torchaudio soundfile librosa" ] }, { @@ -52,13 +45,6 @@ "execution_count": 2, "id": "0c89b2a2-ce31-4773-9454-3e0e57d1a231", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:17.127386Z", - "iopub.status.busy": "2023-03-12T19:47:17.127169Z", - "iopub.status.idle": "2023-03-12T19:47:24.576408Z", - "shell.execute_reply": "2023-03-12T19:47:24.575840Z", - "shell.execute_reply.started": "2023-03-12T19:47:17.127372Z" - }, "tags": [] }, "outputs": [ @@ -66,15 +52,26 @@ "name": "stderr", "output_type": "stream", "text": [ - "/home/helena/venvs/openvino_env/lib/python3.10/site-packages/openvino/offline_transformations/__init__.py:10: FutureWarning: The module is private and following namespace `offline_transformations` will be removed in the future.\n", - " warnings.warn(\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, onnx, openvino\n" + "/home/helena/venvs/openvino_env/lib/python3.10/site-packages/transformers/utils/generic.py:441: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " _torch_pytree._register_pytree_node(\n", + "/home/helena/venvs/openvino_env/lib/python3.10/site-packages/transformers/utils/generic.py:309: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " _torch_pytree._register_pytree_node(\n", + "/home/helena/venvs/openvino_env/lib/python3.10/site-packages/transformers/utils/generic.py:309: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " _torch_pytree._register_pytree_node(\n", + "/home/helena/venvs/openvino_env/lib/python3.10/site-packages/diffusers/utils/outputs.py:63: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " torch.utils._pytree._register_pytree_node(\n", + "/home/helena/venvs/openvino_env/lib/python3.10/site-packages/diffusers/utils/outputs.py:63: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " torch.utils._pytree._register_pytree_node(\n", + "/home/helena/venvs/openvino_env/lib/python3.10/site-packages/transformers/utils/generic.py:309: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " _torch_pytree._register_pytree_node(\n", + "Framework not specified. Using pt to export the model.\n", + "Using the export variant default. Available variants are:\n", + " - default: The default ONNX variant.\n", + "Using framework PyTorch: 2.2.0+cpu\n", + "/home/helena/venvs/openvino_env/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py:246: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", + " mask, torch.tensor(torch.finfo(scores.dtype).min)\n", + "Compiling the model to CPU ...\n", + "Compiling the model to CPU ...\n" ] } ], @@ -104,13 +101,6 @@ "execution_count": 3, "id": "8053abe3-0e1f-445d-8397-630efac28269", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:24.577918Z", - "iopub.status.busy": "2023-03-12T19:47:24.577422Z", - "iopub.status.idle": "2023-03-12T19:47:25.276093Z", - "shell.execute_reply": "2023-03-12T19:47:25.275685Z", - "shell.execute_reply.started": "2023-03-12T19:47:24.577895Z" - }, "tags": [] }, "outputs": [ @@ -154,13 +144,6 @@ "execution_count": 4, "id": "dcf7a5c3-81ba-42cb-a7d9-d22c5bb00325", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:25.276779Z", - "iopub.status.busy": "2023-03-12T19:47:25.276603Z", - "iopub.status.idle": "2023-03-12T19:47:25.278703Z", - "shell.execute_reply": "2023-03-12T19:47:25.278355Z", - "shell.execute_reply.started": "2023-03-12T19:47:25.276764Z" - }, "tags": [] }, "outputs": [], @@ -174,20 +157,20 @@ "execution_count": 5, "id": "648a8eb1-1d50-4503-8094-c9a88098bee9", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:25.279302Z", - "iopub.status.busy": "2023-03-12T19:47:25.279139Z", - "iopub.status.idle": "2023-03-12T19:47:26.802333Z", - "shell.execute_reply": "2023-03-12T19:47:26.801969Z", - "shell.execute_reply.started": "2023-03-12T19:47:25.279288Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to CPU ...\n" + ] + }, { "data": { "text/plain": [ - "{'score': 0.8515876531600952,\n", + "{'score': 0.8515874147415161,\n", " 'start': 12,\n", " 'end': 64,\n", " 'answer': 'a framework for deep learning inference optimization'}" @@ -234,16 +217,16 @@ "execution_count": 6, "id": "c5d8d4be-3449-4a78-aa92-56ef2b355572", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:26.802936Z", - "iopub.status.busy": "2023-03-12T19:47:26.802808Z", - "iopub.status.idle": "2023-03-12T19:47:27.259642Z", - "shell.execute_reply": "2023-03-12T19:47:27.259125Z", - "shell.execute_reply.started": "2023-03-12T19:47:26.802923Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to CPU ...\n" + ] + }, { "data": { "text/plain": [ @@ -285,10 +268,10 @@ "### OpenVINO features\n", "\n", "- For improved performance, it is sometimes useful to reshape the model to use static input shapes\n", - "- Models can be compressed to FP16, which reduces model size by half, and improves performance on GPU (because GPUs contain optimizations for computations with FP16 data).\n", + "- On GPU, inference uses FP16 by default (GPUs contain optimizations for computations with FP16 data). On 4th generation and later Intel® Xeon® Scalable Processors, inference uses BF16 by default. \n", "- OpenVINO supports inference on Intel GPU, either an integrated GPU in your laptop or desktop, or an Intel discrete GPU, for example Intel Arc. \n", "\n", - "By default, when loading a model with `model = OVModelForXxx.from_pretrained(model_id)`, it is compiled on CPU. If you know you want to use GPU inference, static shapes, or FP16, you can set `compile=False` to the `.from_pretrained()` method, to skip the compilation step, as the model will have to be compiled again after steps such as reshaping, fp16 conversion or changing device. The model can then be compile with `model.compile()`. In the case the model was not compiled, it will be automatically done before the first inference, resulting in an increase of the first inference latency, since it will include the model compilation time." + "By default, when loading a model with `model = OVModelForXxx.from_pretrained(model_id)`, it is compiled on CPU. If you need to modify the model, for example to use static shapes, you can set `compile=False` to the `.from_pretrained()` method, to skip the compilation step, as the model will have to be compiled again after steps such as reshaping or changing device. The model can then be compile with `model.compile()`. In the case the model was not compiled, it will be automatically done before the first inference, resulting in an increase of the first inference latency, since it will include the model compilation time." ] }, { @@ -316,20 +299,20 @@ "execution_count": 7, "id": "e0754efa-0beb-4060-8633-daecc5ebca31", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:27.261759Z", - "iopub.status.busy": "2023-03-12T19:47:27.261529Z", - "iopub.status.idle": "2023-03-12T19:47:31.027499Z", - "shell.execute_reply": "2023-03-12T19:47:31.027078Z", - "shell.execute_reply.started": "2023-03-12T19:47:27.261738Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to CPU ...\n" + ] + }, { "data": { "text/plain": [ - "{'score': 0.7991431951522827,\n", + "{'score': 0.7991424798965454,\n", " 'start': 12,\n", " 'end': 62,\n", " 'answer': 'a toolkit for deep learning inference optimization'}" @@ -366,12 +349,12 @@ }, { "cell_type": "markdown", - "id": "3a564882-dfd2-4f62-803b-f1e3485736c3", + "id": "8798b8d5-50ad-439f-a1d8-886d579a91fe", "metadata": {}, "source": [ - "#### Compressing model weights to FP16\n", + "#### Saving Model in FP16 format\n", "\n", - "Compressing model weights saves disk space, and speeds up inference on Intel GPU." + "`model.half()` converts the model weights to FP16 precision. This reduces the size of the model by half, with usually a negligible impact on accuracy." ] }, { @@ -379,13 +362,6 @@ "execution_count": 8, "id": "09f443d5-ff58-416c-ab70-bee16e9f8235", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:31.028314Z", - "iopub.status.busy": "2023-03-12T19:47:31.028164Z", - "iopub.status.idle": "2023-03-12T19:47:32.026899Z", - "shell.execute_reply": "2023-03-12T19:47:32.026017Z", - "shell.execute_reply.started": "2023-03-12T19:47:31.028301Z" - }, "tags": [] }, "outputs": [], @@ -401,7 +377,16 @@ "source": [ "#### Loading Model on GPU\n", "\n", - "For GPU inference, we recommend using FP16. OpenVINO support for dynamic shapes on GPU is in preview mode, so for now we recommend using static shapes.\n", + "A model can be loaded to GPU by using `model.to(\"GPU\")` or by passing `device` to `from_pretrained()`.\n", + "\n", + "GPU inference will automatically run with FP16 precision, regardless of the precision of the weights of the model. To override this, and force FP32 precision you can pass an `ov_config` argument to `.from_pretrained()`: \n", + "\n", + "```\n", + "model = OVModelForQuestionAnswering.from_pretrained(model_id,\n", + " device_name=\"GPU\",\n", + " ov_config={\"INFERENCE_PRECISION_HINT\": \"f32\"}\n", + ")\n", + "```\n", "\n", "OpenVINO's `Core().available_devices` property shows the supported devices on the system. " ] @@ -411,13 +396,6 @@ "execution_count": 9, "id": "09f13ef7-2321-47c6-a08f-5fbe61b32b43", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:32.029229Z", - "iopub.status.busy": "2023-03-12T19:47:32.028882Z", - "iopub.status.idle": "2023-03-12T19:47:32.061033Z", - "shell.execute_reply": "2023-03-12T19:47:32.060524Z", - "shell.execute_reply.started": "2023-03-12T19:47:32.029205Z" - }, "tags": [] }, "outputs": [ @@ -426,7 +404,7 @@ "output_type": "stream", "text": [ "CPU 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz\n", - "GPU Intel(R) Iris(R) Xe Graphics [0x9a49] (iGPU)\n" + "GPU Intel(R) Iris(R) Xe Graphics (iGPU)\n" ] } ], @@ -442,16 +420,17 @@ "execution_count": 10, "id": "bf5194b5-5e80-4597-85d4-fab72fbed2fa", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:32.061881Z", - "iopub.status.busy": "2023-03-12T19:47:32.061689Z", - "iopub.status.idle": "2023-03-12T19:47:33.067283Z", - "shell.execute_reply": "2023-03-12T19:47:33.066928Z", - "shell.execute_reply.started": "2023-03-12T19:47:32.061868Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to GPU ...\n", + "Setting OpenVINO CACHE_DIR to /home/helena/.cache/huggingface/hub/models--helenai--distilbert-base-uncased-distilled-squad-ov-fp32/snapshots/a9da64102a84c4b3f110c4d627937a110e56257f/model_cache\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -461,15 +440,35 @@ } ], "source": [ - "# Compile the model on GPU if a GPU is found\n", + "# Use `model.to()` to compile the model on GPU if a GPU is found\n", "if \"GPU\" in Core().available_devices:\n", - " model.half()\n", " model.reshape(1, 28)\n", " model.to(\"gpu\")\n", " model.compile()\n", " print(ov_pipe.model._device)" ] }, + { + "cell_type": "code", + "execution_count": 11, + "id": "2538c02e-fa35-459f-90fb-e6667ef8747b", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to GPU ...\n", + "Setting OpenVINO CACHE_DIR to distilbert-base-uncased-distilled-squad-ov-fp16/model_cache\n" + ] + } + ], + "source": [ + "# Set the device directly with `.from_pretrained()`\n", + "if \"GPU\" in Core().available_devices:\n", + " model = OVModelForQuestionAnswering.from_pretrained(\"distilbert-base-uncased-distilled-squad-ov-fp16\", device=\"GPU\")" + ] + }, { "cell_type": "markdown", "id": "8e3a06fd-e51b-404e-b82a-2557e3db1375", @@ -507,23 +506,48 @@ "source": [ "Audio classification is the task of automatically categorizing audio data into classes or categories. See Hugging Face's [audio-classification](https://huggingface.co/tasks/audio-classificationhttps://huggingface.co/tasks/audio-classification) documentation for more information.\n", "\n", - "In this example, we use the [MIT/ast-finetuned-speech-commands-v2](https://huggingface.co/MIT/ast-finetuned-speech-commands-v2) model to do inference on an audio file from the [speech commands](https://huggingface.co/datasets/speech_commands/viewer/v0.01/test) dataset. You can try your own audio file too. To see the classes that this model was trained on, run `model.config.id2label`\n", + "In this example, we use the [MIT/ast-finetuned-speech-commands-v2](https://huggingface.co/MIT/ast-finetuned-speech-commands-v2) model to do inference on an audio file from the [speech commands](https://huggingface.co/datasets/speech_commands/viewer/v0.01/test) dataset. You can try your own audio file too. To do that, set `audio_sample = /path/to/audio_file`. To see the classes that this model was trained on, run `model.config.id2label`\n", "\n", "The model pipeline needs ffmpeg. On Ubuntu Linux: `sudo apt install ffmpeg`; see https://ffmpeg.org/download.html for other OSs" ] }, { "cell_type": "code", - "execution_count": 11, - "id": "570fd296-15a1-437d-97db-76b91407dc3c", + "execution_count": 12, + "id": "6b725d0a-e230-4b0e-b6b6-3d6d81eb984d", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to CPU ...\n" + ] + } + ], + "source": [ + "from IPython.display import Audio\n", + "from optimum.intel.openvino import OVModelForAudioClassification\n", + "from transformers import AutoFeatureExtractor, pipeline\n", + "from datasets import load_dataset\n", + "\n", + "model_id = \"helenai/MIT-ast-finetuned-speech-commands-v2-ov\"\n", + "model = OVModelForAudioClassification.from_pretrained(model_id)\n", + "feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)\n", + "ov_pipe = pipeline(\"audio-classification\", model=model, feature_extractor=feature_extractor)\n", + "\n", + "# streaming=true enables loading one item from the dataset without downloading the full dataset\n", + "dataset = load_dataset(\"speech_commands\", \"v0.02\", streaming=True)\n", + "audio_sample = next(iter(dataset[\"test\"]))" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "d3012174-b58f-4efb-bb73-a88dbfb20392", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:33.068965Z", - "iopub.status.busy": "2023-03-12T19:47:33.068575Z", - "iopub.status.idle": "2023-03-12T19:47:38.120307Z", - "shell.execute_reply": "2023-03-12T19:47:38.119775Z", - "shell.execute_reply.started": "2023-03-12T19:47:33.068947Z" - }, "tags": [] }, "outputs": [ @@ -532,7 +556,7 @@ "text/html": [ "\n", " \n", " " @@ -547,29 +571,28 @@ { "data": { "text/plain": [ - "[{'score': 0.9999880790710449, 'label': 'down'},\n", - " {'score': 7.452485419889854e-07, 'label': 'five'},\n", - " {'score': 7.436851205966377e-07, 'label': 'go'}]" + "[{'score': 0.9999935626983643, 'label': 'backward'},\n", + " {'score': 3.4823816008611175e-07, 'label': 'forward'},\n", + " {'score': 3.3890643180711777e-07, 'label': 'wow'}]" ] }, - "execution_count": 11, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from IPython.display import Audio\n", - "from optimum.intel.openvino import OVModelForAudioClassification\n", - "from transformers import AutoFeatureExtractor, pipeline\n", + "if isinstance(audio_sample, dict):\n", + " audio_data = audio_sample[\"audio\"][\"array\"]\n", + " sampling_rate = audio_sample[\"audio\"][\"sampling_rate\"]\n", + "else:\n", + " # if audio_sample is not a dataset item, it should be the path to an audio file\n", + " audio_data = audio_sample\n", + " sampling_rate = None\n", "\n", - "model_id = \"helenai/MIT-ast-finetuned-speech-commands-v2-ov\"\n", - "model = OVModelForAudioClassification.from_pretrained(model_id)\n", - "feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)\n", - "ov_pipe = pipeline(\"audio-classification\", model=model, feature_extractor=feature_extractor)\n", + "display(Audio(audio_data, rate=sampling_rate))\n", "\n", - "audio_url_or_file = \"https://datasets-server.huggingface.co/assets/speech_commands/--/v0.01/test/38/audio/audio.mp3\"\n", - "display(Audio(audio_url_or_file))\n", - "ov_pipe(audio_url_or_file, top_k=3)" + "ov_pipe(audio_data, top_k=3)" ] }, { @@ -586,16 +609,9 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 14, "id": "5207a5af-3b53-43b3-ae5e-5b352c0f08d4", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:38.121136Z", - "iopub.status.busy": "2023-03-12T19:47:38.120930Z", - "iopub.status.idle": "2023-03-12T19:47:42.382362Z", - "shell.execute_reply": "2023-03-12T19:47:42.381733Z", - "shell.execute_reply.started": "2023-03-12T19:47:38.121116Z" - }, "tags": [] }, "outputs": [ @@ -603,18 +619,20 @@ "name": "stderr", "output_type": "stream", "text": [ + "Provided model does not contain state. It may lead to sub-optimal performance.Please reexport model with updated OpenVINO version >= 2023.3.0 calling the `from_pretrained` method with original model and `export=True` parameter\n", + "Compiling the model to CPU ...\n", "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "data": { "text/plain": [ - "[{'generated_text': \"Hello, I'm a language model, so you might consider changing your code to a similar type for Python 2 or 3.\\n\\nWhy Python 2\"},\n", - " {'generated_text': \"Hello, I'm a language model, so the second statement is true or true. You can also have both. It's not strictly necessary, but\"},\n", - " {'generated_text': 'Hello, I\\'m a language model, and we\\'re seeing it all in the Java world,\" he added.\\n\\nSo what might happen next?'}]" + "[{'generated_text': \"Hello, I'm a language model, so I'm really interested in programming. I've always been a programming student. But for a long time,\"},\n", + " {'generated_text': \"Hello, I'm a language model, because I don't think I ever spoke on paper to an editor. I'm simply someone reading a paper.\"},\n", + " {'generated_text': \"Hello, I'm a language model, I understand. Your mother was talking to you. No, well, my grandmother had said that she heard '\"}]" ] }, - "execution_count": 12, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -648,19 +666,19 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 15, "id": "97569002-0c0a-4208-9b63-b2bab209fb81", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:42.383244Z", - "iopub.status.busy": "2023-03-12T19:47:42.383095Z", - "iopub.status.idle": "2023-03-12T19:47:48.887705Z", - "shell.execute_reply": "2023-03-12T19:47:48.887095Z", - "shell.execute_reply.started": "2023-03-12T19:47:42.383228Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to CPU ...\n" + ] + }, { "data": { "image/jpeg": "/9j/4QAoRXhpZgAATU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAAAAAAAD/7QCEUGhvdG9zaG9wIDMuMAA4QklNBAQAAAAAAGccAVoAAxslRxwBAAACAAQcAgAAAgAEHALmAEVodHRwczovL2ZsaWNrci5jb20vZS9BdlNGTzd3bFF3TWxuRUJBbGdJUFpybkxmZ0Y1WTU2alRjR1IlMkJFMGlPZG8lM0QcAgAAAgAEAP/gABBKRklGAAECAAABAAEAAP/bAEMAAwICAwICAwMDAwQDAwQFCAUFBAQFCgcHBggMCgwMCwoLCw0OEhANDhEOCwsQFhARExQVFRUMDxcYFhQYEhQVFP/bAEMBAwQEBQQFCQUFCRQNCw0UFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFP/AABEIAQsBkAMAEQABEQECEQH/xAAdAAABBQEBAQEAAAAAAAAAAAAFAQIDBAYHAAgJ/8QARBAAAgEDAwIFAgQEAwgBAgYDAQIDBAURABIhBjETIkFRYQcUMnGBkQgVI6FCscEWJDNSYtHw8XKC4RclNDWSokNTY//EABsBAAIDAQEBAAAAAAAAAAAAAAACAQMEBQYH/8QANhEAAgICAgEDAwIEBQQDAQEAAAECEQMhEjEEEyJBBVFhMnEUQoGRFVKhsfAjM8HRBuHxQ2L/2gAMAwAAARECEQA/ANEE19VPGDgg9BoAkCHQA4JoAcEOggcE0WCFCaiwHBDosgXZqAFEY0AOCaCGKE+NAouw6AF2aiwFCDRYC7NRYHtmiwHBTjRYChNQB7ZoA9s0ALs0Ae2Y1FgLtGiwPBBosD2z41FgLs+NFge2/GiwG7BosBdg0WAmzRYHtmiwE2amwEMZ0WAhTGpATZ8aAE2DQAmzU2Ahj0WAhXRYDSg1NkibNACbDqRhpT40EUNKDQAhTGgkYUzoIGlNBIwoPUaAGFNTYE4j0WMPVNFgPCcaLAeE1FgO8PQQOEfxoJFEeoIHbOdFkChMemiwHBM+mosBQmoIYoj0EC+HoIF8P40Ae2Y0AKE0ALsOoAXZosDwTGoAXYNAC7NAHgvxqAFCZ9NDdEpWL4egg94egD2w6APbDoA94Z0AIVx6aAPbfjQAhTOgD3h6AG7NAHtmgBCmgD2w6AE2akBNmpsBCg0WA0posBPDJ1JIhjx6aCRvh6CRpTnQAhTU2A0pqbAaY9SAxk/TQAxk0ARsmgCwE+NAxII9QA8JosB6pjUWQOCHRYDvD1BAuzQTQ4R/GggcE1Fge2aLIHbfjRZAu341FhYoQ+2iyD2w6AF2agD3h6AHBBoAUR40WB7YfbQAvh/lqLA94eiwF2aLIHxU5kzyy4GSVGdUZZUl+5di23+w3ZnVxULsHroATYNAHtnGgD3h6AE2fOgg9s0Ae2amyRNmiwEKY0WAmzjtosD2zOpsBNg0WAhTRYCbM6AEKfGgBuzQAmznQAmw6AE2n21JI0roskQpnU2SNMfGiwGFP31IDGTjU2AwpxqQGMmpAsBNLYxIqagBwj/XQA8J8aAHBNRYDgvxqLIHBNFkC7NQAoj0EMUJoIHBNBAvh6AF8PRYCiPnUWAvh/GiwFEfHbRYHvD1ADtmgD3h/OgBQmoA9sGgD2z40AOWSKmYPMwVT5QT7ngazZ37V+6NOCPKTS+zE2YzxrRZmPbPjRYHvD0WB7w/jU2AmzOgD2z40Ae2aAPbNFgJs0AJs0Ae2akBNmoATZ341ICbPjQAnh6AEKaLATZosBDHn001gNKaLAbs0AIU+NBI3YNBNjSmgLGsvvqQGlM6mwI2j1JIwofbU2BOCu4DcMnsPfS2hyUJ20WQPWPUAOCagWx4T40AOEegBQnOiwHCP41FkCiP41FkDhHosgXw9AC+H8aAF2fGoAXw9Fge2aLAds1Fge8POiwF2fGiwF2c6iwF2aLA94fGiwPeHoAkhsz3mRaeNWdgd4VBycc/6ax+TkUIpv7m3xU3J19mMMetVmI9s1NgJ4eiwF8POiwE8PRYHvD0Ae2caLATafbUge8P40AJtHtoATw9ACFNACbDqbATZ8amwEKfGiwE2aAEKakBNh0ANKfGgBCg0ANKamwGmP41NgN8PRYDdmgBjJoJGlP10AMZM6ayT5JivlZYbgtRBcJ3MQzG4kLEHGfX89fOIZskalBtM9pPFD7aNr0X9Wr/AGylkjqnNTEXLK9wOWGfQHg410IfVM+HTfL9zN/AYsm2qOi9OfWmgqQsF7h/llSow8oy0Jb/AKT3118H1fFOlkVf7HOzfTckNwdnQ7bcaS7QGaiqI6mIHaXjOQD7a7MMscq5Qdo5M4Sxupqi4I9OIO2DQBCKum3Mv3EO4HBHiDIPtpeUfuTxf2LQTPrqSGhQmixaHBM+miwoXw9RZNC+HqLIF2fOiyD3h6iyaHCPPposKF8P41AUL4fGiyD3h6LAUJoAXYNQAmwaAPbNAFO4XRLaVRCxqWUugB2hcepPr68aweVK6idbwcXPky6Y8knGPXW6O0jlyVSaPbNSKe2Y0Ae2aAE2aAPbBoA9s0AeKaAE2fOpATZ8aAPbPjQAhTRYCFNTYCbNFgJs0WAmw+2pAaUz6amwGlNFgIU/XRYCFfjU2A0rosBpTQAhT31IDCmgBrR/GpsCMpqbAYU+NAHyxfZaimhpg1rp6qBlBgJg8yD0U+/GvlMGt0z6HPXwXaZ6+WBrZWxxQ7m3sZYgqkkejDsQMDUNq+SHjbXFlCo6fqKSOoeVlp4YyAqMdzvkZ3DHp+eNWetGTSfZV6b3fRND1bWWKSFKKvljVFAKwTN4ZPv+etGHLPEnwk0Z8qxtpSVhGX6k9RtRrTy3urIZcluOQc8ZA1sfneQ+psqXiYe1EIWLrq8RRw0lzvUk1LJyqyHJUf4Szd8Z+dUZfN8hx4KbLoeLhjK3FEN4raamvzv9nVKajCSSNkZb3XnByMHWGMsko3yNkoY7riaun6vufQkcarVyS04fatDV+YMDz5G75/M66Pi/VfIx1GW0c/yPp2Ke1phml+vsYu6U1ZavAp2ABZJdzq3rn0xrvw+rRk7lHRx5fTJJUpbNXR/WDpipVd9a1Mx3YSZCDwcf39Na19S8d/zUZJeDmTqjRdPdS2zqilae3VInVThlIKsv5g862Ys8My5Y3ZkyYp4nU0LeOqLTYHCV9bHBIV3hDksRnGcDS5M+LD/3JUTjwZMv6I2VJuvunaejmqpLrTrFEgkfJ8wB7cd+dU/xvj1fNF38HnTriMp/qJ0/VUS1UVepjZC6rtIYgew1VL6j4sVbmWLwPIl/KBH+slrWlttSlBXS09bIYxJHHkR4OMt7flrP/i/jcuN7Ln9MzpXRpbX1jaLsyCCsTMgLRhgQWUevPb141YvqniN1zorf07yUr42FaCvpLpEZaOpiqYwdpaJt2D7HXShkjkXKDtHPnCUHUlRZ2aeyscseeANLZNC+Hg9tTdkdCeH8aAFERYgAZJOMAahtLbBJvSHfyGO6Vq0s9NOZxDKwimUxbfKDu54b41xfI8lSftekdzxYSxL9z0tMaZ/DIxjGOc8fnrq4ZKUE0cnOmsjsbtOrrKD2zRYCeH8aLA94egD2zRYHimiwE2/GiwEMepsBPD0WB7ZosBCh0WAmz41NgJt+NFgIUzoAQp7aAG7TqQE2Z9NFgNKamwG7DqQEKfGoskTaDnjUkjSntoChpT31NkUMKakgYyfGpAY0epsD41slBNerLDU08oqjG5UKshJ75wV18syPhLi0e+inONply9dYVYojRyAM7gIRJlWJBzkeo44/LSRxL9VkzzOqLP8APa37bxI5t0TxmKSA53gAeUgnvrDOSVXp/BcsjasdbLK9cYpLgu4OMkI6ofyH+Wpj5UZSaTVEKKkrka+paktUVvllSSKGn80dLHtBTPfd/wA+e2rIz5t0W6ikTUfVFtuJSNYoPuWYqieGAdg5xkjT8ZJk8k/3BF7e5o0NzoqGKcBg20HxMc+qntjHfV0XHcZMqnyW4lnqm73WKLxEj8eAzRsEFOWMa7c8nt30sIxerGnKSVgiwzv1DcJVZKaklWVnMsx86sewI9e3bVkm4Irj7zS1VigqJq6m/wDy43F08SNvDwqjgnkZxzpFkeu6LHD+4Is90vvRdVK9CBTTOpiaZV3hlPIIJ9PbW7F5E8T5Y5UYsmCM1xlGxtV9T7nDVRVFf9vVTI3JniBJJ7njWXPyzycpttv8lmNrDFRjpIPdF9cUfUd4n++Sl8B1KeCsAwR3ByOeNYcsJQguN2asc4yezbdNRWyWumkiroamhnwtMiIMxSZ/ArD0PsdZMrmoq+/k0QUW+9BQXdp7tRW22yUdKs1Qscsa43BGYAlRjvnUY4X7pWwnKtROxda/RX/YO1dTXanrLlLS0VRTGhWpEb+KshVZBuABwGY625fHjTa0ZYZpXT2aGq+j0digqzT3hK6sg8B6unelEQRZCAGVhyf108Fm8df9DI0xJ+nm/wC7BMz9T03VN1Bf7RFCxazR+NNUo+I3X/pB5z8a0f4p9Qhyipp8fwZ34HiSp8asF9RWW6WqB1npqmKIRq8syoSsasPKSR763R+veRFVlxf2M3+FYW7hMggvi3IK0zwrUGMBUgi2BsDA49Dgavw//IMOOKjKLKc30eWSTlGS/sSJUbkDbMr7g66Mfr/gvuTX9DC/o/krqv7lu23yG0XKkqWKEhtwRsEkevBB/wAtNP6r4fkwljjP/wABD6d5OGUZuIOqPqFabhDcElkmoqtVMaP9uv8AUYjIC88bQB3HPxrFDc1waaOgofE0wpLMK+GlqhE0QmgRsPjJIGCeCe5B16DxZVjpvps4XlwrL7URFNbbMFHvD1IUe8PQRR7ZoJo94egihNmgKPbMamwoTw9FhQhTHposgQpnRYCbBqbATZoATYdACFdSA3YNTYCbNFgIU0WA3Z8akCOZ46dN0jrGucZc4GdQ2ltkpN9GI63+o9F0y1LF9ygaeN5V2+ZmC+gHzrhef9Tx+I4pvs3+P4zndoj6M6vi6gjpa1qmICXesgL8rxuXd6DjP7ab6b5y8vFzk6fyNnxcLikbaKWKoGY5EkA9UYEa7ikpbTMFNdisoAznH56myAdVXmko3lFTIIEjxmSThST6A+p1TPPDHfPSRascpL2k1PPDWQJNBKkkTjKupyDq2M4zXKL0VuLi6Y4oCODn8tWWRR8q9IdNvR1FJX2aWOYYxIkjDBB4PBH+evlOaV3GZ9Ax41FqUGVPq70cftaOpE6KWfBZRguxPAA/tjU+NmULsPIx3FbB9ipyr01BO7FXjJZYlICDb5VJ9zk642byVmyS9Ovb/wC9sIKkkzTUVlgu1KhWWRGomIcO3IUdyf3/ALaqh5McfJxLeKlpfBmutLheoaoAU6CiLCWGSMhgsa8AEnnPvrs+F6U4e1lM3O+tBDo2equdMlJCtPCZGJesZQ0iA5wB7emr8kFB9jxbl0dKqKemtNvaOHw8yIRI+MhzjksPnWZPk6NDVIxN7vl5udtkpYabwZ5EERdH2gc5B+Ma0JRjuRncptUgwbTS0VqqbhJH4cslGTPUBlZGYAcfJzznVHNtpIt4pKwP0/QU126de50bVK3CKXwEEQyHVsDa3x86unNwlxkVxipLkg1UdMywX+jq7o4pIjCNyO3AcDjawPbVfrLg4xHeP3KUjKfVOxPWFZ7ZSx1hKjxqinlBYY7eT/XVniz93vdFOaDa0iP6T2WXpurkud2qloqZYyuwjMj59h6as8uUci4w2yPHx8NzOhwzTXe60NxtsqwQUBkmeHwTEWyvlZT+Fjgd9c11GLjJdmvbknF9BGxXW3V6L1gluV62FDIBM2MlDkH9xpHzxSWO9ErjNObR0aL+IjqLr36S9WSXiiNLcLhUpPDVUrgwxJEVJVYyc+YL7+p1ryZYqXB7ujNCEnHmtHSOk/4k7L9Sem5brSWaqo7jcZqemrN8+6NEgYHcmB378cd+dLnzxwycZInFilljyXRpf5503cLx1hU01/loKq+0xiRblSFI6dzjBDDO4apjmwTcny/UWShlikq6Og2e8U8kVitplprgKtZYJ56dg6MsSeUn4PzrdCUWkk7Msk022jN9V9O9JteLtJU2WnrmtlHTSCMOYVP3E7LuLL64HH5aWWHHduJKySqr2AK36F2hJK+30N8uNuq6m4TxULKqypH4cYfa+7kjGfnVH8LjbbZb60qpAH6efT+19d9Kx3CpuM1FPNcGt0CTQZWSUDI+QCQf21kx+Lzi25UaJ5uLqjn96vUMFfLSrSR3D7aolp6qUR5ELITuOcZ9Ow99ZVHJhk0pU0XXHIk0rQO6c61sXU1dJQ0VW/2qFGjliJ4fJ8roRwOO+usvqHn+ND3u1+f/AGYX4nj55Wls1UN3vT3mvW5WiKGk374KqjkDI6k/8vcY9td3xv8A5FhUUsyaOJn+j5HJvGwlHXU8jFfECOO6vwdejwfUfF8lf9Oa/wBjj5fB8jD+qDLIXOuhZhao9tzqbIPbNFgIU0WB7ZqbJQmzRYUe2aLJIaiaOlieSVwiINzEnsPfQ3XYcb6BqdVWWScQrdaNpCniBVmU5X31X62O65L+5Z6M6vi6LVFcqO50xqKSpiqIASpkjYEAjvzp4zjNXF2hHBxdNbOH/UX+IHDS23pwPFOkhR6twMEDvt9ufU68f9R+tuN4vG7vb/8AR6DxPpqdTzdfYwS/xDdV2qM7ayGqZmxtqYw20fGMa5WH615kLUnf7o25fp3jy/SqND0f/FNWnnqS1p4LMB4tN5CgzjlT311sH16XWaN/sYp/Sotex1+5v7f9fLHcalo4+VycDOCRnAIz/lrqx+s+PJ0jG/pmRLfZoqj6o9P09TNB9xJI8QBfw4yQM+mfXWuX1LxotxcuimP07yJK0gxaOprXfYpJKOrSRYzht3lI/fWiHmYMiuM0Z5+HnxupROa/Vuuiu9ba4Kesp/DO5hIZhhHUjBK/mMa4n1PyMc3CCmqd/P2Ol4nj5IKTlF2YTrboqbqqODqVCsC01IIJojh33ZHnCjnGfXXzr6p9RjmyKMF0t/v8nexeJOEXNnNrdfZ7VdK61VVRsJJEYjU4JC8A/oSdZIZckYcsbaT72VcI8qmjS031QvXTKxvTVZhp4VP9Pb5ckeo9e2ur4/1jzMUlxlpfBXLwcUk3Rj+q/rn1DcayWYXKWOCVdojiyAjY5YD3Ouk/qnmZnfKgh4mGCqiTp36yXatraeGtr3r1LBSKznA9xpIeZ5MZ3kna/OzQ8GLjUYm7vX1C6jqbSs1rmnWjRj5o4c7QDjHHvntrb5H1PM1H08lJv/Ywx8S5P2hr6Y9cXzpa1Vn88SonhijXwI2U+YnncP75z663/Tvq0sUJPNK0ujNn8CUppRVGMo7xXWyoBtlILdPLH5lfJWTHqM8c689myRVc38noE2v06ZpbjYT1VZKFqqqKVf8Ax1QeYqw75/8APXWfJ5UMdx++i1w5pJ9mdo7O1LbrjWQzeI7MY5mVsgAdhj0OuQskuEYQ+P8AUlRqy509Pb6e00qq1TEauJNxY/i8vA57gknn40mPI1kcmtEJpL9wlbemKK52yhhr4BUBXeJPHfB45AOO/I1uweQo5VwdJ9V0RxjSshpqc9Dq9sKwLXV8wmpoV3LHt3chj+g11Oc57k+iEvT9vyzbS0UEwD1MRXK4ZFOR84Oq1liumXuLfwAbp0/ZrjiCSKeCZB/xEyvA+R/rqxZX2VyhGWmWZ7faulbBLJieeOUBBFVvuBXOe2MftpVJylomo44Ef086YaNVNtZ6OGQM6NHVgxIT7jt+/bGozZb3PY2GH+TRmL/DPeOq6YRqlakEgirdkxEcmCRvwTwD28utGNxjib+/RTJ3Ov7gfpqiuj9dVFuWgqKWBlli2qhZVB/D5vbtzqzJKPpKd7JipPI41oI9VW279WVESWmhNT9oohlLEIzleCze2fbSYZRgm8j7InGeR1BGspLhJ0zQWW1Xdik8i4enghZ3YMfKEbtx2Os1c3KUOi3eOozNTR9KUQsVfboI5qY1iuHBJJViMDAPbWV5W5qTfRdGCpxK3R3TJ6Os0dveeeqPjZYnnOT3x6D41ZlzLLLkRjxuC4o3dqqYp4DBBF9i9OOV8DwlbIzkD1PvrnSuLt7TNyqSroA1vXlPRX5bRU1UqSuEKu6YUhu2D+etSwpx5pGVz4y4NmrK3mCiFRbK/wDlbqQWk2AkLnzevGRkZ+dZFOMJDOPJUWv/AMcrr0Vd3q3SY0syQ0prSFnidB+Dejdgvp+etmLLmTtTt/ZmfLDGluNL8C9M/wAVNTSrVRzU8F3uc1TVVUFTch4TUssilQFEfDoQAMcEAjXTjnf/APRf2Mjxf5X/AHLH0G+tVDauk0t9ba6q+1dvuT3JqvxvB8OYIQDtxyo5GBzz20nrRwR9y76od4pZZdmdtlVBe7rWXq3V0stsuVVI8lAKUqIpWJLEnuBnPJ751yc8m2+a2boRUV7XoAdc01J0SUuMFIHSaoJeTABQlfwhh2HqM5GpwN5bg2JNLH7kMoOsq6tWgm/mhKU8RZqONBy/oS4+D+Htq54IbVdg8jbTsJ0PV8U04kq6OSlrXJikdiCr7RkcexB7jVM8DWoy0NGXzRrLH1jRVlMmH2byyCGQ4Ykew/112fC+q+V4L4ZPdH/b+pzfK+n4PK90fbI0cVypAsStNFC7AYjdwDr3fi+dg8uHLHL+nyeP8nw83jyqcf6l3broWYKPFcd8aLJIYaymqHdYp4pWQkMqOCVI7ggdtQpJ9MZxkttE23Opsh/k5d9WPrPT/T8PS0dL9/XhMud2I4c/h3e+fbXH836lDxk1Bcpf7fudXxfAnm3PS/3Ple99edQ9bXVfvbpUDxEKmCMlYwM55wdeNzeXnz+6ctHpMXjY8dKKBnjskphEhDou3KDhdc63BcrNLddnSOmfqxWdK9G1lpt9OZJpQSZWYbVLdzj1J11sP1j+HwrDiW39zm5PEjnyc38HIrqbzV1NTM4SnAYMvpkEfGufcZys6PDitGVbqALVMauWVNsmMDkE6veKuhE1ezT0NHcuqaqKGipzK7gSHxOAoB/EfjWaUVBXZa030jff7Jw2mmJmrRUViDtDwFP5+uudkyyb10WRg3LQMmu1VBTlKeQu4QnLEgkZz+p1EJNP7WX8X0j1m68em8WJ6uWmIXL+ICu8eoHzqZ+PN7T0UtpA+bqf+YsSoZFVuJSCP8++iWCSVsjk+zTdNfVeewCSBB46OBzJ6HGD/wCtc7J4vN8kWLLxNrYbzY+pK01C2+k/mSrvZ2iG5gPY6VYpR9jeiyot3Rl/q903WXPpypjssDSzL/WZI1Vmf3AxySMjWjx4rFlXqdGfLBuLUUfPtq6T6ruNdDDT26omkkyFLJjAHcn2/XXoFLHLUTnccifRsT9LOpLbPLGj0VQEP4onIdjjJAyOMDUSSlsv4Side+ilXPT9K+BW7gplbaQpZBzjue/bXnfJ/wC7+DZjXttnQVorTUVErrLVSFzxFLKQq8f4V/PSOSqkX6OZ0zQ0dRJR0oPlDvF4zbjnHAHwda/IeWa55f8AQxLjF8UaKqqa2rtsUEKGKVQAxjxgDjPftjOuas/qur1Gy6O9mKqo6iy1tdR0rssSEqj4PmdjjDH1GPU6fBCGVcm9/Aj06RXsRlkq6i3082yKiCpx5/DXPpnjHcasxSjjleXaf+pXxbXZ0OmpxC0FRKJayIssscakAxsAe5H76nx3Hx5XCNp/6FqjW+wtPb6fqa5U9dPaGkqoBtg8YhgwPJI29tdp5pQjxWm/9Bv1u6J7etZW19WtawpooiVjVvwqR3Ug9hrHhcUtytliUm6ZUlrqrx1HgCGKMZkjbB7nAIYd1P8ArrprjRW27LV1no5aKRrkCkCMYyh5B4yNo9dEbv2hLr3GN6dudts08lNSVElGkrhxUKMfv++r53Km0UxqOkXus6W0xQSVtHSiWYhh49PKUIbHGccd/wB9TjcrphKMe6KPQHWFRPR3erWoaVaQBp6Nx+FO2Qe+c6M+NR4r7j4sjdtAmxXi50fVlXDZHq0SRzIxEbFAxGWXJHIBOrnGLxpzKoykpvgP66r+pKXqK2105luJoxGE+3jOM7s+YDsffTYceJwcVqyvNLKpqXdGy6f61uAaukW0v4STmYN4zPLuPcBTyR+XA1heGHTZqjklfRbg6qqj1HG1yopUo52SOKRI9rxE8EkkjAHJ/wAtLLEuHsY/O5XItzG426rnpzXVktOYyiTMOe/BBPZsDudS1BxWg9yb2ZK3zV9k60nrb+lU8FLCVpqyRgy4JyM84Omyyxxw+1pfgrjzU7mv2KA+u9wrKqoghqvuWORJS+GNoHbt7a5krlC1Gn9x/Ue9leh+sU9AIbXVmmqqHLLJRyR7iV74z6geg1kUc8Xzuxll1xKVI1RV3+ir7VItRTy1ah12lFiJbjOPTXoMWaOXFJSVNFEm4yVdH0XU25IquarNVUNE8JQU0DBYGyOTjGc/OuRGVri0bWl2S9IVVLZ6OnjpoxSw7SNksoYj359Tqcqc9sMcqKf1DhluMtAYL7DbrfJkTUzKCs+OSN3bt6ftowNQT5R2TlVtbo5B1MlH07GgtcuaefEvgSSb/CYHsreoz8a1vyfbb7MjhXRlLz1zcqwU/wBzUSlFJ2uq52n8xrI3Ka4xYyk+2HLB9WaGwUbQ/eR/dtL4ilzuMKY9O5XPPPzrqYsOTKryKimWWOPSKd//AIiununKiqrJrbW3GCrjCTR+7g5JDsR29Ma1LxmkkpU19it5k7tXZrLb/EtF1hYhVdPV6xJCixvSVY8KSNgOCSSc8A851pjl87Eqx5L/AHMs8Xj5e4UBrp9X+oIq6rS4180azQqJFZsDbngj259RrPLzvNi3zldkrxcH8q6AtrkukTNerXVyRJUllcxv5cgjOfU51y15k8U3JSpv8mxwhONNIIXz6q9RWO3zUdN1G9Qm8M5PlZD6EH21pwed5MouCmyr+FwN8qujJ3K+TXjFZUMa9qiJd0nIycdz+Ws/KTk+b2zWtdFmkMNlolkNFHvkGPEY4DKex/THfVPNvTGelYOaNZqpJlSNIiRnz5DD1J1c2nGhEr2wxUUhFOrUlLuQ43MnJHBOePTWeosdwrotQR111oWiq1QQbBtLqBgdxyedLzUJdjqLa2ZepNrF0D1VFFKiuB5MEEDjnTvPNLUihtJ2b8zW/qPw56WZaOnXEKxRAKxb3wPT/PWWbt2Xr3la/wDTlTT0dVHQxSVVMjFZqiXyEFQM9+4+RpHNRatl0MUlbSOfQViyyyQrUJDUs/hIZm8ufz9da64pNiKbX7mwhsFPW08S1VLAahG3GVM+YAYJxpYzlFXHomVSf5AHUVHaqOdPAimQxDzusmFLfl2xo5Tnozy7oA1NZFMyqqI6kYPHJ1HBx3YtKw70TXrZr9DVfbPJEGC+XO3njOPjVE7ey6Mfk192EVTVu1sqZqaeOUNMEcgSrnJXjUcVJOyzi29aCVbJcaiqmahqKelp3VMQRyf1HyOQzHnjtq3BOOGKQuSG9EEHST3JIjWVypO7YMMWWDAHgsfTWp+RH4K3B0bey2aOzwVVLD4FRTn+pGiZ3BhyOPfWFq5cho6TiWrXUNSAz1EyQPNlUEy+Ju9+PfSU07BJmGr1jpatBEkFVNDCkZaPkAE8gnJ5zzxq5SyencloqlH3aGUN6ihnqKlqjZURLtUDlWO7zAgjjGvMpPG3we2Qm9pnrxXTta7jUrVUUNNUU6RkgZlMh/EcDkd++uzgzwUEqRLXtbTMGXirbmxsxENWkYZ3jwEniVlDD8x3OdLmtYeWTX/NFDTl0bi2dU0tH4yzRNUQyQpJCjPhBg4LAe5/76zePPJB7XZpjNLTNZ1F1VBJSrHShY6rhNkA2xjjvu9DyBra82TTl02WSfLrswt4u9za4Q/zCrJL+fwkXhyO5OuhCGGM1NR/Z/BlnOTCP3d3SqEtStQwk85jCDbt9MgHjvxrcuVVS/oyE53bI7lcash6Kk8d4kk3RR1eS4Q8k7W7jOR8a1RpbIlfSBriknZEpLXMKqIlhCJM+KxI4x7d9S3L7kqvhG4s966coEe3PTpQRVkfiTeLl1ViDlST2wdZXzl7u6LvatPQX6PjtcEslvp0ZJkkwwjhTFQDgq5bGSMevbVWWU+y7Eo9M0s12h6fkgM9XSUsTrsWB8M7MM+RQvqSRydZN5LSTNd8Kt6M7T9SWJ7itRPE8dyeb7eVFQ/0s8ec8duTkZ1rlDNxXF6M0cmNybktg+urKKO7KKaHZVLUMs1wLEsYwMArngDvxqyPOcW5vVdFUnCL9vf3A/UN5t0tXHfaOompZ3V4AUTfEsgxtfB5yRn99VcvSjwlsiUub5LsDH6z3WhSp+5navG3/jPCFb2z++ojT/THX7i+pOO2wZ1hdrb1J0kq1VPPSVKPnx4nLrMmc4OTkY1U8snJWiG00cuqwKb7aotgmWCQ4lqGweQcADtgenrrVC1GXNFL/A6jr4KysFvmjJE6lvF5B/MH0xoqoJ/I0UnKmdH+nl2g6LiqYKb/AH6mqmBnjqm5yMYII9tVtz7NPGC6fZob71/dOoKV6aCOWspaXiKSnyuDkfiP+LA4zqMepN1RE02qWyleeuKtpLY32q21YvIIQpBJ9cg++jlTd7Kt6DE92jTp+AVKiSrEbMpjkLcHJUEHjvquLUvnReotJWj5++pH1lHTz1VOsQnvGNiI4DRwH3YA9/j9/bXSxeHFu59fYx5vIk3o5f091rfuvuoILZV3SeKGoJ3RRHZHtHJyBjjAzzroQwYse4RRkeScu2dapOqui7C0VC1WGk2mORtniRu2Pw5Hc9v/ADOr/wByq/sZD6n3qgu5pkp6Y1cZQpDI9R4UIPHAA/X1BP8AnU5FqVoFfTSrn6GulJV1UMs1NVg09VQiRGDxk43dycfnyO/tpbJUaOpdWwXWO501RZb5BeaJCoktFUyLKIufwv2OO+CQfjSzqa2TTi7RtugLTXVlwgp6VFgG3xCrvlMHv2zrz2bFxblJf1NmNRvR0im6Wt9THUNUx0kyMwBxhm7dvnWfHkaqmaFHXZhurbbNZKyKngp1QHO3wU4PpjWjnytsVx+wVqbLUVPSVLR04KuHLTB1HlHvz2xqnm+VjNfBlrfbYammWnjUqFy5kbkN6AZHzzjUPNTtkRino2nQnThpfFhmnhlKgs8hyEjHtj41VOfJ84ouhBPQQ6i+nFVfVSV6qnaNyuGib0IO0j0J1Cnu2iZY70cb6o+mHUNlnkkqZHajPbcQrAeh4+NdGObBkSaWzLkwzT950L6RUdGaaok+3NdJEFZYZmHnfsuAf3yNc7yOSmt0jZ49cWmrH/XGmvFPa6EQRVkdUud8ijEKbu65/wAWB/nqPEljcm5NFmZZFFKJzv6Z/SWuute94uJNRHCCrRowKxHGA2c4/wDWtnkeYo/9OBnxeO37pGh+/tqbqZppWnppNsrRvkgDt++li5Q2+mLKlpsyVfe6StrK6GKqIiMmSGH4/bn01ZUnUpGVzTbXwBxc6SkrvAmfEi5zsUcD01Lxza5VoRSSZuT0bVz9PJdLTOapvD3NApBYfA5599ValLiaqpWmALfe7tPSySikdEHkzIMOWGtUscIriDnNofZbrLUVySSSPFJGxGSSMf8A31RPHFRtFalZ083avo7BTt4KyLVR743X/Efgj141kX5LbNT9OZa240n39VGYnhdo9hkxvUDB4+DqOL5aLF1bCF0ligkE9TIHfdsRYCcLg6texbORWGBp6OWSlElMGkGF271kA/EWPoe2CNNnV4+OXev2oxQiuNorC4xJdE+9WLw0csQCYyy8gDPrjg8++uPPx3w9pbyT0ErlcfuaanNPDiMSE+DlUZsAeXtyPn11j4OE/T/H7oS/hFmepjS2vFFTQURUYSCn2ufNjBOO2TnnnWj01PJFSbdfcslJVSBdK0NPBVCaq8SAyh2jUhFiUsMqM8nJ9NPLJOD9nx/qEdq7Nfdb7baS2SRwwyMQf6JxtVAGB3DOcnnHOtvjKOVccj7Yzko/pJLF4/UNY8k9KAicxu0YIye2T6n0499dRTUbiRCMm9kx6t/2fuVyje3xx/eR7JINxd6cD8K7yDwO+BjRHGklT6LHNRbVdmeTqlvFqKiSoLTQxkt45LEr8fJzrXL9JnTd2BYepf5lI7MMbQGEqnBA9sjVXKcB7j8mo6bttvudXTTNcKUOqlzT12WBIHCg+pPz20/rSUNr+wKMZPsiufUSy1cDh4oK+BsZg/HtIxtJ9tVxzxV8loXi5dASbqJ4a2JzSSNOjZEvLZOdV5czf/bYcJLs1qdQU0lpiim2Th2MyJtPiI5POT6nSyzZ1HlGP7kt6+5mL1crvaZJAY0heQkiSXz71x6Z4zjQ8vLGkwSa22ZSnuFfWJL4OKinVhwTkKfbGrYwTjcmRFSfQbgoKKeNhUQPG+/DL4mVYew1neTj0x3GyxfOnK22VUDiVWoJogYlkZOR+QPH66aOaHHrZM4cKaI+kbNX3u8NG9vkWmXGzbgBx67f89WZ/wBKa7Iht7Q3qXo5LJcFkiYVMD5d3YhmjPtj8s6FJRpy7Jljbdrop0PT9FDA9TU1U0dL4fAUDO46eXkOWkiFHVvo6Z01LDbunZ6O0VFSigrlpVGXJHJx3xrJOTk7se1pRBHUFiTw46yW4xVtSFDNCqefnPPJ0sZS/Q0NFfc419QeorhZoYbXBXCnrrgT/VZtop4sEk59CQMZHpnHJGux4uGH/coyZpteyz5uujfzGslWMSeIrNvZmyG+eeffXVWjni0kz0YHgHwyYGjZweSCfNz89v30y+5En8Gg6PaK33NJ6oMYghHPGM9vy1DISNBNTJW1Ze1V8UEi81MNQyrERjhjny5x69/bUUNv4Npb16cudPBLDMEliQeJUJF/QL+u31x+mNI0WphWkvdvqbTPS0FTHFUgNH46wCNAT2I3c5J9T76WmPZm5+sqrpOGCSkt8N4uEa5Zq6bhcdgRuB28HJUjOcZGOXi0Q9bNp9DPq7furpbzFdcPT02ySnnRsCKRmOYwTklcAkDJxjvzrh+fFQqUFt/Y2+I3kbTfR3q2X6LqC6wpKrO+xv6TpuDEY5PtjnXMb5KzcoNSoxfW3VUlpu9XSNN9uhUqVD4G0+mT3OqYxnNtx6KpJJ0YW2dTW+lqZnjqDI7MVUK+QBjGSD6/Gtb8fLlSVUZ4zUXo7P0LNQHp6ps10u1NDJcWVkqoCPEjB/wNngHtwONcuUJ45OSXR18UYuPFsjirU6Zu/hpWfzGiiLRoufKE7bwO3fXSj74VWzJJOEvuHKHp2o6wq6mWp3VxWlOKFWAIU9m76zTaxL7Ky1J5O9hH6TdCW6yrVXSesMtVBJuKsoXwgAeOeQfj41k8qblJJdGjBDim2evHVM3Vl0mSpqBDb448mmkXKsMeYk4z+2tEfHUY67KZZrf4MP0ZdDX3tukVo1pqOcttWmj3MFz+I/8Af51VlTS9V7DDOTlw6QA/iU6Et30/orBQ2Zo6SWunklmeWXdKRxyT7Dtz761eFkllnKc90U+ZjjGKSOHXS1R/bsouHgkgMwT1Oe/sddSGZ3uJxWyktmllq0aSeligU5aZ2P8AUHx859NXLJyjqxseJ5Ht0dTtF3m6epF+2rHjjJHnHZm/76xcLfR1oxx441ZLL1ILpUA1j7mYgE7QC37afg4uiOUWVq0xbcROivKuB5fMP19TqHLejPLvs1yXCOqjpokqFSmijQKCcBOeTj99U+nz9yL69to1Ft6ztVro5qaWrWFpDxJjccfHt21ChTKm60ZTqf6sM6EW+JWiUFWmdckn403C3ViOetHumaioijk3Iq7UePEnldOecAHB1w8+X1XW3+2xYV0D7lPS1zo7+QKW87ruRWPYH/4/66ypy5v29iTaegdSytW1CwK8s0UYOZZWXysV744zg5wOfTW2fHE+cVT+wjf8pWopJEuE61LbaQFiQ5DiQgHjA7e+tEJQVTkrYi5WwZJfYq6vqafCwiU7VkjbCg4Ayw9T+vtqHivb0RCbbp6NE933WQIqCsKBR4u3kqO5PsdY+Dc+TdGmGWo7RooOp6ylpoRQ1yqkkLUwduChPJAb0OtWHNNScsisuc38dAGto6qkoZFhqSSeCZvM5PBJJ9cnPOu9DyoZJaRRwe6ZRtsUsdartLF4JH9VSu7dxjI/XV0nGQKL7JrkkdLPFFT28ywOCC6thvz0koyS5cuiJL8F+g6hezqWgqBBMg2lkUAgfGe+NctrJJ0laFxycXpnpaR4KRq9iZKdhuM2MFz76lynL2mmacVy+CGO6PHsmhmK5GFCjOoUa/cJZW4klBcKlamSY+C4JwHLY/PnWlXNUmUbWwqtfT1tVGlZAFVhkOx3DU8PTu3Q3HYTlktFFbo5aSijkeVmjDonmHpk49c6I8ci4mhNJUjnf301TcqqJJVKxnILdwRqXiUexNt0XEqHq3Bil3FcDHJB/TSwgqti19zUWDqWnheDfLNFJE5XxFBBx8Y07ahFrsdPdDurJHNQrjYYKiMuJRyePc++kg1KO+yxtr9jOUVQ0sJgd1nhbhv8Xl+dCVPZW4Pj2bGWgv4ghWlcCk8ESvKAqlUAAAA7k6qfEhJpWAZKOS51vh2+olllVfPvjPl9P0/LTKTXukiFJtHzN/EAs1D1rPQzz+LNTf02wcj8IB/7a9L41PFFo5ua+bKvUv0rvvR/02obve6ans/3s6fawM2aqpVk3ZYA+RQMHB5yeQNRDyIZcjhDddjTwzxw5S1ZkbY8VPtabLdsA/4j8/GtXZl/JanWqpV+9ikEyScuByFGeAdQyVZPT9UyUEqNBg1CnyxpGpQceoII/t+ulHsMP1/NVRRfdsEMPPhxcLJ+foP27aXsblXZWqvqbWyI6xRQRGQgsyxHOcY/5sdvjTUK5NmcvF8nvjK1S6mRRkkDvpa+aB20d3+i9iqLL0upnhaKSvl8cZTDMcARp+2T8btcbyfIipOt0dHxsLcd/J9OfTOgMVUkV6qhbKWamkDshzICSPKD6e3OvOeVeV88Z2MPGK4TPfVX6EWiK9UvUDXOoraVBtNHMoJTK+XaV4Hf11d4fkz4vElT+5Xn8eK99nFa3pqy0E6SGCWmiaQIImXaWHuV7+vfXooynOH5OU4KLLnUt4oen4ofs5GFKDty6dvbP/mNYoQeRtZOzU8qgkosKdM/UPNzoIt8bUm9EaUgbJQ3deRzqvNg4ptdlkM1vZ2f/aqTpasrKiP7IoygCVlAJQ428+gz6a5npRytJI0+q4JshmqKoDq+sp/CimqaVI3i7gSY59eODwdXcFGUF8IXm2pP7mctP1Ago6xLXb6KmrKpIgktRUgsGbHJXPc4yNNk5Tk+L0ZYZFdV0Wuk/qTbrJ1WqQ2+WGaSN4llxukkPJ74zjPYDWbNCTgkzTjyqM/3DlGaT600F1qb509S0jwo8FBUVQIckfiODyOccjUZF/CNLHO/uXRazJ81+xw1f4R+rKyppoZqygprdPI22saoYge4C45Oty+pYV7km2YV4MrW1RZun8OcHRVU0dzuk9wpYeylQhlBHlAUnnnjOrI+fyjcY02WfwcYN70AKi01F0+4iSmeGn8TciLyqHsAMfGmWdRe5GJyTbT0HunbVTUUckZRjWxthqlsjZ8EemkyeQn30zRCeOFOL2Q3DpS4JNLcQsctNkSRiPjJHbv860+pi4qnsSSblYI/r19Y2IWiZMM4The/v/poW1pkSlbSSJXoFqVZvuFWTkZwcA4z6aIxVuwli+UwBJbZ3pTNWRtsRd7qhJ2fHGrdfyFLjJLZ0WklUPE1NShqjlTK0+A67eRgf5n515GXkzldKl+wqya0gWlclH/Rk2GklK+UplQcH1+P20s4ub517l/sUybUqB8UKR3I1gczmnRjJG/AT0V1x3766Mm+NP8AmLVLjK2Ot1tnpq9YowkrVMhIimz5/UksOy/lzk6r9SLfekLFb18mapXk8KSlqqQQoZpZIw0ecZfJ7+nGB8a6WX3NP8fAK66C1Pc3ihnitbpSlWXbIFJw2CM8/GsuPCssuM2M6oK2q5fao8VXPMgcCMLGFO9vVsY+NWfw/fGKfyKn0jRwiOugmgHmiypQ7fMFI4/LWHcHzWmXXVoWk6bWHeJKiGPYhJ385PoM9v8A1rV/E8kkr2NCdfAItlyndqlJInigkYpnYfMB6j9dddY+KVdl8Z3podcaaKOBnWnGO/jSjJPtxqYS/kQkoqJPYKyO70bUjzrsA4Rl7Dv2P5d9LOCi7a39x8bU0+T0Rw2Whefy1cgkA2U+47YjluSfftjXMy+TLluOjNKScqvRTioayS6EfcQxRw7t8YG4EjkYPb/3rdHKlj5LoV7DUcsF1p6Z5IlpHUFSu/nOOML/AH1zJ+TOLcXsnloQiml8Oji5aMBQXGGJx3xqvH5E1LlPpjRdujOVJFDVOqIEcdz2A/M66ak8kbT0FNvQT8OnES+FCoeIebbkZb3HvqjlSp/JtyQjGPt7ANPU1n3c0crNs7h05Yfp741qjGNWYY3ZZqa2ZaiOImTYfIC3ruPGRpmlH9I7ZoOnulLijTiClNTSEscIcHdnJXnudVZfJxOubpl6lHGzpHRfVD3CCalAlApkETI4GIs+mcZ9NJ+l/gi09oD3PqmKskhpUhRPCkLmaH+m7e5H9u+iM8bvRFXpHAYPpfD1NeqvqnqikrJ7jUVstS9D4i+FFHv8iNtB3HABPOPT31035ia9LE9JdlGPBFvnk7MP/EB1lUdTdTQUkyyU9FQqRFEx3BS+Mtx34A1s8LGoQbXyZ/Lyc5KPwjmLUY4b7gywDu8UZOD7YOOfz10UzCWJruptNNQIpjKSM7OTzg47n9ND2FhzpHo+vvF1pJaKiSsRX8Q03iKvjKDyFyw3H4Bz3xk8aqyJOLt0acMHOdJWa679OW66VjVFNRR0JWASVVquEDxSeMX4SA43YKebz47HHcAZYR46s6Hp3pxtf6o7N9Df4Nvp5/EHRX22Wj6jVlp63oKb7gWia2bqaPzlQTKXzKpIHmjHAYHB1pc1F1RZHxMWVe17K3Wn0ApPpb1VFB1r0FHZa6KLdHLaauSegrNgAEkSux748wLHBPKjWPJ4/kuD9PLa/K3/AHElihB3ONfsbv6PXGm+oU10tdBSukqRuYJWRWVXxwzE/h/TXmfK8f0VGUn2bcEoTTUQ5ffpN1DfZoDaayS4XIKplR5hCd3bHbBAAz39dX4c2CH61SEnhm3yi9nc7N01PQ/TSGyVkULX+SPb9vNKs2588Nuxkka5U5KWVzi9G2nwprZza7/TGssrTyVlBT3KaLKxQnGSWPr7cZ/bW5eRftjKjI8T7aMh/sNb7XHUR19sjqIammy1NUeZQwzjb651Y5ZJvlF9CRhXaMFUfy+2U7QQUcbKjB44FG3Y3rjOr483rI/6mfI4xdI6Lb/5fUwQLOJGQRrvQAupyMjGfb20sYcFaYydoLOIrdLLHR0slWa3vAowzAAbiw9gNJutjL7HMa6y3Wg6tkdYpY1ViyI3Azn0+dXwmo47ZncXGTYTprdXWdTdGuAiqwfEUIoJQD3b0PPbXOyTx5JKKQkZSi7TD8n1BoKq11c1XE0teIwvjpOUAY/9J75z6arywnrH8Gn1lTtWwdavqrcJ7YaQVUjxw8iLJIiwMbhqiXj8X+GVw8iaVMHdbdf3bqKjpk//AFUzMAoIywwP39NacUEu+iMnkTmqszP+2T2yUVAELSonh/0x+J/+pfcaf0eSozJ/JXqur6ytkheKIwCQqZ5NvDnGc/On9FVtjWHW6lqaK3LEY3kjfLgSk4Gfj0HxquMOUqvosTdASpkq41URROWm8+OABnjv7euuzinjSfJjNooURr7fV1ED07mU5GY+x/Ua0N451JMnHOUS3T5G9HBSQBQw9fzxqNA5NsIU9NEkQ8I4qWkAV43YRj55z8DXlJKauKX/ALMCTjpEl7S3UMFNQTPNA6gl5oZFZRuyfw/6/GseOWabeVVX+uhW9qypQ1lv/l1ZUwViVsko8GUbdmEycbfUZwO2um1KXFTjVbWzU6q07KdbVVVba680lc7sECPCrbdqjnI98Y5I76jHGGPIucf6/wDgqhqHYHtNaKy3W1bhs8PwwpaM+YBslWI9iBq/KnjnLgWuMklb0ELTTLJUGiU/bSzTrJJJjIwTjA+MajmoJTixoRXRqaKloDPFHup6liWKrjdjaeSfQe2py5MmPHyjeiykugnXXOOgpyaNAkPlDQ/4Gzxj4x378a5mNetTm/cTa7LD22a52+mipv6e6NpG3EBGxwAD+XbTwzRxZLyDOpPQIgepjqpLfKnhSjBy5ztwMj99elhOMoqad2SuS9pdnjmfbUTTpT4wzPJxkegGkyzhjSbFn3bLlnqrba6j7qpihq6QFlYFslj6cDn/ANaVzi48tkKVb+DHdTVQuFelTRbUiGQib9pI9AvzrAqf6l2ZJ23aPAVa0cG54w6LmTew3HIzggaq5RTcX0Tv5LTQiCCOtVVotwBwScknj+/vqtyTfDskqRVENVcBPHUt4dOMMRwZPYA/OnlGlTXYrdsr3eZXuMEk29ogeFUdzjI/TV2GXGEoosUqdMZV1dxurSvCBSKxIjklOFZwO+ffWzxscGuLds045couN7KnQlHV3W8OtypJxEisd8cZUAj1z6jGrfLksMPaylX8o6MnTdFJeIK5YlnUKWjqJ2BjcKuDlR2x3HzriT8qXpOCf/slsZBd4+pbhMLV4zCGLKtCjInDHILZ4J1n9OWKK9T5Euw1JY5ZIf5nLVPRrsPjLHP5we2Scemro5lGHFbZbqjEXCI2W4VMy1TTQLGcbs7ufgfnrpYpRnDaLEuK2DOlryKq7NDNKphd3UeKdqNxyd3pj2+NW5sSiuUFsmElJ1Z88fVI2K49a3Q2OpD0SeVZfFkZN2SDjflnHyoAHsdek8SOSOFLKqf/AD7HKzuLn7OgLbqX+WszTMktLOuFmU4A577Tz3BxkYODjtrTJVsqX5LqdKwXeCSoWVIlHHihu35j11FjKNgBbrX2VlpPHbw4XLqqN5QfcaKUtkJuPRt7V9VbrURrFVdTXGmpgoH2kWSrY4A3kkgc445x21S8cU+jWvLyrTkTW/6z1nQfWlv6m6Nkms93oJ1kWV5XcTY42yAnDKRkEEchvfTcbjT2MvKcMiyY1X/n9z7/ALL/ABF9F/xR/T2nTqexXmyzNmRzDCWgSqQfjp5jxjuDn04bOudPyl4zfJ7R6SGXB5OPl1+H/wC/kDXCttP086XttJ0tQU9PVV6r93URhSWC/iKjPl9PXjOvMuUs0nLI/wBjLqCSx/J2Ch69tTWG1mNoqQqqtI2xckAc5B5XJ9e+sTxyds0KUV2c0+pf1Ck/mdJU21Wtt0QNFuDhgoI4Kn3b51q8bEn+paKZyT38o5gv1FvHSF2eWKqkrZo5TLNHUSGXc+PXJ7841vyYoZvwY3llCdRIq+8XPryne9rSSUkQkBldFI8NxyFz/wBtWxhHA/cTU8n7mYu9famu6ZMjso2tDI3H559TzpZST6Q6wwcvc9nRYesobd03FELea2aPDRSgEFE74JHzrG2+TcXoqnJRXFbLXTf1KSqoY3SEx1ALsXZgVGeMY7nTqTScp7+wQm2jI3asqqi5z3OtuE7GWQhXdc4/L0GMapc+UeKWyjLJrbYA+/qr3d5oHdpImQsc4CfLEajgoRTXZki2+yvK09FWBZIDJGVIXHK7j7HVrpq0xn3QO6e/py3GSWba7NsaIHG7jPGmzO6SQq0Eal2oIfuo18QcbyoPlHrjVcVy0x1SMvenlimFXBHKFDCRmkH4s/P7a6KjS4NiOr0Ojv8ANfmiidxG5/CoIwVHt/fSrFw2x+V6C9vkqKqeSMVAnjZdzSA5A9hj31RJRjuh3Jx0WZUmm2R09RNHVSHyq5GT6EY9vbUQlF3yQnaDdguJZ8VhaskT+nISApyBx/21ZHIsbaXRZBpaK9fJFVU5mWBYpGcojREO3c4DDUxyz5UnoWWW2U7h1AKGCBYZWgapR0CRKAMBsZPx349jrjY8blNye0jHyctmVvV4qrrVEhlYRqF8MDACqDzxrdDHHHj+17LJNtfkMdPdOw9S2ySrpAaSolkOI438hIXzAj05Ge+qsmV4VHm9L7liqSuJarekqikjnhguVKagKFeBptpkUghwBj29tH8ZGWK3F1Y0otKl2CaX6e19PRTzLHGFhOxPDkC9hjYc4OR8++ml5cMjXaf7DQxTpuRWpXqx9nS4FJVNEPGWVjlSp4HznVqjGuXaKVa0aCKkNrRpY3YmWXl41IUtgnv6jI1jyZPUlxv4G3F2Go0j6ivVTTyPIaeNVlUKSBK+OO3oPb51lgnixpR7v/Q0RipaRq5/GraaCSWOFEimWN0BONoHGDxzn01h5RhJxb7G4v8AUVqhZKyqmLLCJWwFlMeQvHHI11IThGK22v3FcrewFcp46wTU1TsKxoWVh23A4GR31sUpxXdr89lUpdpg1a+GGmNE8SxzSq+EHJYD1z/5207lLu9FayNaZg7pUzR26mqIpGiJZwyOMFcHgAf663RjGT4sqm7inHsIW66xU7LFPUO0mzLFu/vzqh47dpBFpaZNdruIo/DkO+mkOVI52DvpVj3rsmUlHszadYsoxExSHfuHlAHPBwNa3gv9yp5L6Og9O1VPX2ovNVRRoT5XIyyj3xrmZYSg6SNKpxtsWSwWrqS31NBJNN9wG3LUoCQnPDAD002LLkwTUq0TCno6Q9wjoukBYTy0cSxxVBk2u/GDk+mRz8aryTlPJyaNMpa4nM7ss1BU0jitSWk3Y+38RQ6AjkY7jgcH99aY8Wmq2ZXoOTdb0fTPTdHLR+Mr7SzAxAKMnkDHHbsdYVgnmytSBzUUHbTc6fqezxx0PipT1YJaSqJlZWBBIbHoRzrJODwTufaLISvoyv1EaroK+qY0sixTU6xo0Z2DOOWB7MPTBx3Gu/4HDJi5S7TNVvjbKvT9hr7tQQC6wfyxTCTEyYLbiCpZgPU5yPz1qnXPli3YquWmYL6qfTyzfT+y0lEaOnmmjt81YlUW2VLOjqBuIIYqd45GOcjPGuvDPOTtr5SOfkxrHo5BaeqEsU9atytqXWlraf7fG4RyKA6sMNtbgFRxj0HPcHTJPIlTqjP8aBcnU9VH4sVJvp4HO1Vk28JnIBO0D2yfjViWthYHqlmMm+UFiSfN3B/XTaQWW+n7V/O7xRUJqEpFqZkiMzjyxhjjcfgdzz6aSclGNhptI+vPo59B4ujYrj/tCtqv9IzCWlaSkD7cZy2HU8FcHGfT9dcfyMvLcdHQxw9PvZveprzSWujgoKJjDHNF4axwjYDgY2r6DjGuNkxXK0bPU4KomS/nn8olkV1eZI2QKzLhkJxz+Z500cfJaBTo6LPeVulimg8Tem5Zg7qBnJGF+SB6jWJKns0Oa0jHXW41FTJ9zNvnogxBaXgduB+fxrXCFLXZn58n+DIwVyLfTyUc5xvzknGRkds8ftq5wco0wjKpm6k60qbh0bTW/diSOqMkhjwA6YwM+v6axpQhJpmhZHOkY659MVNfUPVJV/ZybgSAAxI+PbVsZr7WV5oNS10a2zwvHAX8VfH27vCc4LD047ayTq3oo5JaoieaOWsko4CjnYZn2KMp78+moScY2zO5bpAeW/ySwNBLLE6f8PZK5Usc53e3bjTemou0itt3TMhUVsFTe1eld4zFINwQYDDJJBPx/lraovhsF3o11bDDAieJJ47yYfaCRsU+49NYVbG7MtUXCkp7vFMgV1EmGiyR4nqefT01tjBuDTF1dmlaRbrRTMoakgcYDQMCVJHGAfTWVXFr5CW0BpbS8drkpGnNdV4KlGbA2/4dq9sj11oWROXJ6Qq74lTpekWjp5LeaNIqzJV55BnG7OF+D+WrMsm5ck9DRaWmiK2W97OHhcAVAm2tJFuDNjt+mNLOXN2gihlLdHmral6afxPtQdsgB3ZHOB8jT8NU/kW1G6EorvWXaUSo7GZcSbRgDOeSff8AXUShGCEjLkzRWU4qWSaWSmqZO5CrwBySAffOkUnGtaGa5OjAyXOSomniXMpKnwS74wc8nTvEoSVmRz5NJEVLWstFOC7oMDxUK5IIPH751ZJK6ouU62GOm79DDbq+mQiCoDqvjxtkFe+zHznk6x+Tik5R+UaISSi/uaHoKvjknmkV4xUQIULTeaQgjHCnjaPX11n8mHHHtdsswzttv4I791NJa7zMkhaTxImjlhflXZuRj8xznU4PH9THcV89iyk4tmes90gr62KoJIqI/wCmsW0sW7gEn44P6DW6eNr2/BRCabs0N1uxgpKG21c6TsZQ/iKx2sBx5Txg47599YngjyeSI8pf5maKiqaNo6iWkk3u0yhdjfhAXsB/iH+o1z+E7SkjSpJXQanuNxjkaIQkAfibHBGPxH25Gj+HxS3ZcsvtopPeqQRNU7zHURAtMiLhSM/ix6HWqGBr2fHwYZ5ovdAi4XqLxoFFOiLMx3GQ5A478a0KDrvor52VJFakrJKyCuaOEx7N0MYJPOe/pqH7oKLWxk62jI9dVlVWtFLTJFOm78acGNRz+HW/xUoe2RXkTe0YprjJTXQFZDKQDvdhkDPc66LgnGjM7UtGtpKEXS0+NJL4km4LHEo8hznO4/tjWKS4PRpUdbJqDpez00yTSSzERDJU42M3xpZZ8jVUTxilYaukaXGnipKSNaKcglmKkKw9Bge4Gs0ZOPultDP3aRNa6K72CCCtmCrGWMTQA7nVeCHPpjQpYs03CyVCUfcVrh1hVNXwxTUv3LyMShOQgOfXP662RwxptOiHN2aPpOx0VuWa9XBlNf8A1Pto2/FnvuDdsfB1zfIySn/04dfIy1tg7rPq82iyGolRUaSURlpGBcxtwxUNnkc/lxqcHj+pOk//ANK5zpWDrB1bTT3dkilqbXbxHHHEhjYKWByASMDOPnnOrcuBqG0myIS39jsNHfaK/q9FcYGlRY8wvVoC4QsPw/Of31R4mKcJNp/2OpjnLjT6CtPa7fSTlIWkaAOXzIN3OBxrtRio6JbPlH+JHorqpes7r1HM0lTZallgp2SoDGKHgLEUGMDcOBjknuTrv4EpYU/t/uczLjnKTmla1/T7HEAs8c5RFab/AKcZ/wDWrWr0zM0XaKramqKaaWKSAB8o+Ccke36kar410RVbDIt9z69riKRlqpUBaSSd1iWMHjLO2BkkcdzxqqU1j3ISUq2wOlnqY7lNQLtlnQusn27CUKq53kYPOACeO/66aPvpkr7vo+1Ppxb7j0l9O7dQtXxXpKdQ0NXTo22SNxuQDccnhh7cY9tc7L7pOkdWEWorZR+ocMq21KiOo+1faTsC5DcjIB9O2quCfY03SMDD1H9rbJKmfh95U+Y5U+hI1keNuVIVScVs6L9Puqv51akWfZCtOgjGQADz6fPOsGbD6UvvZPqdAL6jWS6ypG8colgnk3kyjb4SjGCABznGMa04M2OHaLI3P9IO6SkvN+qkoKjCHILh49ipgHBLHvps9R2h+eSXtl0vwHKvp2qoIzRy1AmHBcw4IDAZGD/3+dc9yTldF0OHGiW2TSVk8UEhaR9mGKt5hjjWmXCEeRQ27olrqqKigkIhYVH/AA1j3EmTntk/vrKnJvZTJv5A/wDNlpIWnkjAlXJjw2AT6g45OncOTKn9wJcqyouGJZIY08NMyVEQPly2cn582r0opU2JZFb44rRUSPLAJ4UY+Z2yrZ7Ee+pk5SpJjRTQWo7vXXKqdJKmNYeRLlvMiA53f3GqZQUN0Ly+GWrubZTz7AIbiyHOxYyJG3cf37/loi5v8DSkkrKa3RI6qOm8U0qhxseTzKi+oJHoPTTOFK+yt38jq6ZrbI0KysytJ5vLwFI757jSpcuxXaIKh5DXwwfcFoWbc1QPPtPyR6Z4/XViVxbosT+BIJHo7jPFWSxREYVJlbdtOO/H56HH2polNxYyzW/7G21C1cixVKVBkyMlTk9xkeo1M5W010K/kGdTlLT9vV0cclOamLCKMN585bt341djXqJqXwQ/btdhW0XAVksBeJvFaMeLO58oBHH5YOqJLj0TG7uiunT9BSzUb7lhjdDlyCcYxq6TlKQqhEdWWKCWpqplgWN5CBtJI24Uc/t/npnS1ZPBN3RkIk8W61kcWIxJjwmAO0sOMkDV7/7abFaV10FFvcvTVsp5VpqVJp5J4pGVirFcBQPyOSe/fWeXjxzab7othN4Y2vkL00rXqrtsrxI0IUGTxCAxwOc/px+2s7wSxRkoN0NUp1sntNqC1s8NIX2LK0oCHasR3Ht/zZUftqcjrH7/AP8ARuEYNqg3SCy9ROY5amOKaOMp4gGATz6H31zpY8uLfwK+Mnsp9NWtekrjPDX1ZlVo1ZUSPO9Q2Qf0yOfz1pnL1VyiiF7bs2C2+RpapqLzGWMOzucgKxPmBPqNY1OKS5k8HuiGCH7Wvip5oJpRPHsk3RgoOO7EfHOrHNtOURVGqTOc3y3i2zT1D1ElSgQsitkKCfw8jvxnj411cUvUVVRmlFLpi2e8zXnpuGGD/gRbhJInGCOTkfHbVWSCxZW5fI0JNxoo0NXJWU0+aTa8JKsHBwPbOtDil8jRbadoH22wNa7/ACVElUhAG9lhAYsh7jHpjV2TIp46SFjHjK7N7HZQ1XQlLc5oapTI0hYAR4H4uOx5GubLL7HvaNTp1o9dbbSwIlO83gwyFcZXlucEjnnk6iE21dFMkkq+DR9PUs0NFL95FDJJHkLMygswHrg9uONYcsuUlw+Ro6TsGtcGuDR0zInhzkkxM+B3I/TkavhD03yXaBTb7Mr1X03PTyytFMrSKN8UWcDA9BrrYM3NJtFcov4MtL1RcPtFpZ6kwNCCAky4O48/rrR6ML5JdlXJmul6Z+6o6T+bILhZpEWXw5J85kIwGyDkDnO3XMWZqT9LUi6r76Lsf8wtciW2Kinq6OjUALKwVdxyUI45Ix3PIx86pajkfO6bGUZNqKR0Pp6BWt6yiJocgLtkbJGTydaPGxu22zoxXGNAn6m9X1XQvSqta1NRcqmQpCJBkKwGSfyA5Ou5gwqb2Z8s+K0cZqPqo93+nUtov5ravqyXqGlr0ukkSy0sFIgO5ERRlSGCkrg7h+Wu1BxSjj6jZzm58uaew31Y/S166GvPU9M1ulvf27Pup2/qRy8KQ5OCWPttAx+erc6i054zKnKDpsw15+k1RdLLFcsyR0kdIKmILKrswZdxbaOwyP21kXt0zZhcMlqTp/BymppIaekpHjm8Qzxl3TP4CDjB/wA9FurorlVJph6lnj6Fu3i09SKiZqViCFXhyeAeT5Tj3zg+mpwzuXJroqblJUjUfTL653npSqgoq+rnqbGo2x0vDCAZ7LnsvxnSTwqStdmuGVw76PpupSHqKxhwqTQyJ4yl+QwP+Ej5/wBNciT4WjpR3sxNV0UMOZKdKhnk8YrsyWHoB/YaxwzcndUZ5XJ20aCgvNHaaKIfbj+ZSy7XVQF8FfUYHHp31RllPLKn0VzTlJWaE7Lhb6dp3kMiR+MTGcqwycAk8dtV1Fdo1Y4tNUCK61z2u3S3KMO1MpMZUHuGOQx+B21Xj9zr5Njk1avQ+2zvVZWam8Hcu7cmQG/XUZKjtu2Z7vRdpZYLUz1Cr4oHlIAztBPYH11W1y9o6qKsD3euNz8SopIkeenODTyErxg4YDtnURTi6l8mOeRt0YjqK6xPRws0YWYhnZ0PGB/r341uhB2JklSQNo+oJzHAskciW+ZiuSuQ3GDz2znUyxp9doq5WbLqq15t1LJFAIkmXweZck5GQVPbWXFN8m38Frt9GEslTPZ7o1urUlO9G2h8Ddx7nuO3Gt+RKUecTNu2maulv9HGkM8isrQqNmxfMG7HLfP9tYnFvQW0gZ1FeJK1dyKqRKuckDOc/wCerMcKdljdobTVC190qaGeo8FkUSs7ndvPrqZLilJIXTbVmkaqgjhMZkjEKKP64TYXxggYH5ao3ZaqRneqaWa928XShLTrDKBNGU27FHYk+p/L01qwuMXwkVyTkriQ1Nzp2cyeNklgEi584HfnUqPxRD1sLW6x1XUljppaen2/bTbhI7jLAA5Vc+3r8aplNY5NX2Vyyrj+wcjofsIWeI0807psdSVaNSeCGHGfg+mqlvTMzyzb0Z6avlM0TQpkDI2SrkY7Dga6SahGmdB66CNLUgUda9RGfEIxkcn2JH+WsbyRv2hy+xnZopbR1FBIIGlkeMMYW7+Y8DHrnWjeTF38i/zGQ6qo3NxlLuzIszBoYh+F85Yc47cjW2MdKMfgSSuVGo+nlVNdGmlp6cLUKQjysRgoe2F99UZJeh+p6NEJNbaN5dCyUM9NLRSUsUojWOWFt8kmAck47AHXFeR5JqV3/sLLJKcuL6Alm6WtFguMkEldU19Ydrq0cQ8JCDnaxPPPOrJeRkzx6SRT6fCV8jVVVV1Pe7RUvTxQ0oYbYDKAMRg8ntxnHbWCEcGOVN2aXKTVIw9x6+utpiSzV9vNJNU4jDoQQqZwcEevr+uugvFxzfqwdpFfOS00aSG81dLaGq6muVogzLH597suBz8HGqnCE9RWxpNxV2PsVzp+oYmjqaaOopUPiL4nfI7Y/wDONNOLx9PZEZqRTuNJDNUxloWpYZFZV+0AG0EjHGMenrqI2o/ditplaw9J1lFW1AqbgtTRVIyqSDnwwMDd7f66tyZk4pQjTQqXH50EW6Qp45HqoYlUU8YRWjbJdA3J5+NJ606pvsZr5QYttt+0tDy1Tnw5RuD7wCq84yv9tZpybkuI0Wq2Q0lpp1uME/jxTUkSEhZDuVhjGBnsdWucnBxqmNSWwZcr6sNLUxrSyHxZPCQvyS2DwvwR208MTckUSbrXyZKzU9xjuFbUNAwgUBdhfLREknlfy1tm4OKiUQjK7YJe83JrpJKJAwjB43cY+BraoQUUixJp9mmt1itt4paarulQKvCsWp9pAkB7DIwQB76yznlg3HGq/Jaop7YLrOh5rZJUziaOWhVR4CxTsXG05yN3p/208cykla38icKYMtfUdbFURvc6ispQytLEA5Qbl9ee+Rp3hiv0pMWE3GSbOu9G9aGDpuGeSF54ZJRHHK45YnkZHxrSlFOkdNSc0pAr6hXCovtqQw0TR1K+JArEcIrEFhu7c4Guh43yzNn+D5yutXJR1E0LAo6MVPHrnWxmXZ61dJr1VBUHctPNEhfxiOf/AIj3J9tLdENWjU9K9E9X3Gw1vgdRz0k8aiGioJpiy1GQcoN3C5GAPT8tM5JuqF9P5Zym72e721VWuoqqmSH+kvjRkBfXAP7nRroEkgRqRiSnZRKm8sI8jdt749caZNitH2b0/daastcFsglkaOnhQeOwx4o2+UnHvrzXkOUpWdOHVBeOaipY5IRIizKQfGfPBx298DWSUWnZdo5t1VUGDqWnWlQVDyJzFC4Cn1zye/PbWvDGMoNyZVxc5VFHZo+jami6bWefxKWocbdsyFCAVH+BsHH565zbWTi0bYwr3Jlbp36RUlXaZb1/t8WqaGPbW2Cup5I3qC/KiFs4IUAHPb8taZZpzThHH+zQ78eCgpxnb+UETJT01OqpgQIoCq/c++uJKMm7M2/kz10vcdDVCWIvI0a/1IkHlxnHI9RjV0YOijK66MtcpprjVmoo4XRGQo6sN28HPm/Ma0x4pVIzNKT7M71PaGpbLHLM/wB065QhAcKnuNa8MlLSEnqNspW2aho6CCijjeWWKRZnTxSUmPfgHscaJJttjRarQZpY7lNWpOAy0Tk7DMCI0Pp+XtnVMuKi0+wSd2Dj1N4tWYKoK4EmF8UZZWHG7P76f0/baIbVUOuT0GyNKSednXe8qyABWx7e2oipfKK5fhgyuvz2zxA2JYKobfEEOSpAGBj9dWwx81+xHJptGSt90qq+9yyFHR3j5yOVAHP+WtssSjFUK7bs6dZ0or1Z6eSoq5aaoCiJQnG8j0z+XGuXk5Y5dGhK0hq3qCnvMtuXxTQeH4aiQ4bHrnHf203C4832I5OMqQGgoqWkv5eVvBp6NC9NxkNJwQGU/iGM8a0yk5Y6Xz2RJU6NfabzHKrxNTwUMcj/AHDzIzbdmcN5RnA9PTWRwtX8mBpbTQ+7UsEsaGnpZqKfc1QF3nblceVQR5ht5OTxnUxjJMVS4vRvK202ShpKuaEGOSfj+pwQvvx2HrjXG5Z5yip9I9Bxgrswlxovsllj+9DJLKEIxwB/zfOuxHG8kbqjDJKPyCOp6d6e7W6oiUzViPGtOxPBZW7lT78YGuh46Tg20RKVV9znXXnSV6t/USGCV6mv8TxpURs4Zjxz27+mteDLFxbl0Q007NV9NxfLNff5RWUmypqF8VSRwmB+Fjjg85/XWXy3hlic29GzHLbjI6pb7FVWKcTwCSrqfDIqGlfBIJ8uB6415SeaGROL0vgnio7jsbLerZFL4E0kJBfe6kkvv99x9fjTRhNq0VtoD3Hqn7K8SCCqCP4XCswEePy9SNWRwXGmheVOrMj1pX2S7VS+NK5rWdVdqcZDkduf8Oul4yy44NJaFm0wfbrTXgVMD0RgpzMIaeSZm3ybvX2ORj9taOUNO9/JXKL40zTWKGq6cSRJniJgJhaONgwx689vX01RljGbtFceUAtS1UD2qpqTJKso4VNw2jBzwPYY1TKMnJIflaYNoeoVaF4ZXWUtGMgdgQc4zqxwplKkvlhCi6xmpqQJSRKYwcvsTdkk+uqn46btjLM1qIXi6ghuMvkpSadVDyO6jHPHI9ec6X0uK72SsluugZ1JTS9P20ywwJDSSLlCHDd/f2/LVmDjklTeyySVWzIWjqtrnHT76v7ZabvLIgYEg4B59cZxrXPFxb1dmVTrTYD6Yu5unVRpJqnZBUgASqckZ4P799X5YKGPlXRGOVzph23WSjSpYwSyVscTESCqXGBnHcH4zocpNK9GqlZHd77SW640yqiGmhjZcKff/ER786eMG4uxJSp6IqHr2go2kglaNUyDHHG3lT3LD1z8aZ4XLZWp12U67ofqG/1lRcZU+2tisWhE75Ecffv7ar/isWNKK2yZQk3fwdEs9MLX0ituel+yeaMmnnbP9Uj/ABjPbOe51mxZJTz8k7+/4N2G4qjL0fVVfJ9Rqfpy5SxLEgiWSPfhSdm/OSe5GP2GvUePFRhr5Kszcp0yve7Rbura2oFRSiCczkLInG1WY8Acdu/6acWgT1NYn6KtFf8Aabnj8RZYp0TzbBghmx+HOcY7jjQlYrLMV1mtvTlJU13+7tURKyR4AYjgZB9AF/XUdB2W7HR0v1GTYwP28IVBEzZwAcck9sjUEvRzP6u/SeXopoq+mYS0UjbJUyN8T+hIH+E+/bII1YmhGmjm8MW86uUfkrcqPq76W26S9dK2QohSojpUj8g80uF4z8gH9tedzyj6kkdXArgrNBdOm6ia5zUFW6rUbgWblc9tpz7HSxcJK0WtW6Z9P/wzfw92O+9F2z/aO02WhuEN7iulwuFzkXxpYIpAKeniY/h5RyRkbt3OeNeh8OGBeM5yVt/6HPyc1n4rS/B9LfxK3PoTqvp4f7UXK20ngSMlMZqjEkiumONmSy55K+m3Oud9SUHiW1a3+f6GzwlKEnKcXx6/H9T8zvqvfaj6OXjpVbX1Jb7/AFVSprZaakVpooUBwoy34gx7cDtrj44x8zFPFJNLq+mdPI34jg4tP5LVV1cKlTUSyJ90y+KIQo2k9yAPz7DWD0FFcF0jFPLzk2ZGm69e4+PLTqKdkbaSRgZOOP7nV0/H49szykpbQ22dRydOQNHKd77yqsreUBu/Pr/pqueP1HZmTikG6OuoqikqEmSN4YozFgsOAw52k/Oq5KUWqZD3utHLf5RWUFwlrWAgo2kZIlLbnXGBgj0P566jlGUVH5Kb4O2aa3dUwR0ngIxqoEJ8SJ3JRsc8/qe2sssTbv5LVNPZl+qrolfFBWJtiMed4jGFP6e+tGGDi3EmbTjYJW8ZEUu+afefDdW4CrnIC++ruF/go5InvNx+zWlZZHZ1CKEdcjA5DH59NJCN2hno9SSzwUFVcpYlDTP4oAIyVyc8dwNTacuEWOnxVstWq+09PRvJJOUx/wAGPGWUnnP9sfrpZwcnVE9LsM1V4rbnCzxo/i0uJAgUYcNgkgfnqlY1DT+RLcmbodGUHU30h61vVVETfrZAKmmIcrtGAAMA4zuJ10PDx45Qk38FGeclNJHJLdQ17UlMJhNv8YeNUcqypgZXA9fz9dZm43oMsWkmzWdL9Q/zG51tojuLQU2VjM0IDHAJ/CzfhJGAdJlx8Ep1bM76M1W/W4VsdOzeN4gwrBR2+R760rxeN0dB5JS7No1ypUo4a2VJXkWCKeRwhUFcsNx9vTOs0VJpwstyR1Fr7C9SXyOl/ktxq0BWaJaiKONuNhJ2YPcZxnWuONxw8EUzrkmWqijp+qHpayp3VU4bfHTLIzhiq5QMBjsfXWWDcG0Xwhzd9mw/h5p7t9TOuX6drzNb5Hp6icVG1f6bRoDsBPocgZ1E/DxeRJLlqhMk3jRhLh1hVWmrqmrfHSWBpKaZ1U8lSRwf07/Oub/CJPiqHcqOXXL6j1b1qPu8i7lwfPwT359ddeHiRUaKed9kFZ1Y9ezDxBu2DGcg9u+mXjqL6ElKujQ9L9X2vpFGkvFNNJW1CBoZY3VlK9s49Of11Rl8fJm1jei1L017vk2dL9TbdW9Mb69PGkxJHA27gHvnj15/TGs/8LOM6j+LKXk+5lKTr/7i0REuoEkrREjuABnkn9861Px6kZXKUXRWl6vkSF1VjLGMAlOwyMn9QP8APTLAm7J5SIabrOKKCFd6HIKhR3GD66JYW70NFNo19kv60duiNPUQtLMQdqt5xgnJ9gORrPKNvo2uCx4lNPYMputJIWo/AURssexsNnxOScn3OrHjTTTOe00wDN9Qai9wvDNK4O52SN144479tXR8aMNol8kqZm4rlNJNPRoWKyAuYl9fXjWrgqUiI45S3QQs2y2XamlnnEShlO8HOARxquVyi0kaPQadksXUNVSdSbmqDCiSbJRH5lKA98n3HGmWNOHRMZOLv5HdT9XxyTSx01MlPHwYHH4s+59wRp44/uI5tGbt9YZ63xpHG4cnCg5P5atktUim/cbin+qtwWNaa6wGoo1JyqeU/GT7ZwcfGsD8KN3DTLVOXTOt/SDqgXtLjJcJHqqFWC/dSruPGCFGedZJYeE1x0/sdDxW3fLo5r9frzFb/q5HWW4LBUU1LCXmiP4mGSrfB27RrteHKTx2xPJpZKR7pv6iSXquqZpIPFkADykDCke7e+TwO3fW4pT0a2+dYKnSN7mFKBUGnwJwAEDsT798Ak4GoYy2ckt9nvXVFtE+8vEQyLJM/dgAcD9/76XsY6R0N4f0v6eequM0SpMyuyDa2444IbPz7Y1PRBnr91RF1eamnhmjnV33gOQCwx6n4/uPy1KA5Z1dZBZK6FYhsjnQOI88o3Zh+/bVtujPJH3J/DZ0Dbqr6cWDrzqy7w9OdH0iwwVVbIR4mVQsfBQ53sQMn2HodeZz405Sl+TtePHkkvwdu/iF6U+n9Dcumafp2/tdLnVUTVq0UkOWemAABDKo2tkjytg4B1Q4xSuBonjpOUtNGw+iVH0pQ9OXut6xr7PbbPDT0y1EnURRot67pVOHOCUBAGOcnXb+kRlLHNS2rMX1BrF6dadf7nFv4jOqfpX15elv3Q3UtuuhtuKaSko6JoWA27vEXyqrqS2MrntjJxqPq0MUcceP6m6MmHNOVqT0fK/TvQ0VyFzvl1kkpzUVLszrNsYICdoAHO3jtrp4fpmXJhjOLSVFH8fig3GSbZl2erNbM8NYVUsW3cjC+muK8aSpov29oErUz2sSqkhnhl7tgjI0zxqW/sLbiS/zueYs8hHhwqpSBh3znkHSeikS6d2Q1F6kNUkpciGJEBbkBvXHyRqfRSQrj+dEty6qqLxUeIr+dFEhA7Agcn541XHCoick2/wVaW+tapQsZPiMVdgDkAd+Pz400sfLsI6dRIrrf1plqKKUGUSDOEx5WzkZ/LOmhibqQ9acWQ2i60tJUI9Xu8FWyuB6amWNtaIUFHsszX1+pb8oipnemQqWC4BwBj/PSLEscKbLYrm+ieguKpX1NO6CIbHiBkY8/B+c+mieO0nEZq9FC41T22eVBiOo8yu2Pw858v6Y1MI8kmZ8kWg90T1FOayaNZA0tVGImEnIAHpqjPCl+xGOR0Kx3O9Wnpvqih8QLR3KCONp5VIVU3Zyo9xj199TimoxlGKu0PLCpSi2+jk9VcnssiPS1ss23/8AzqzR59+NXQhy01RmzSrQLgvE9BULVUphhPib/MMndg8fII/TVzgmuMjKmbWn/h7u1Evj3W92q2rHhyom8V155GB66yS82L1GLZ6VeDL+eSRuOmLMlfWUlKld4EH2ahqh+QQKgKSI/wDFwx1PzJ0VKMbhGTpdf6gn6/rK1xs9ts4DikphAxXCnKsRyPnPb01sVL9RknFzft6Oh/wqfRzqTr+2daV8ldR2agsdtaqapqlMniS8+HAAD5Q2D5vTAwDnXM8zLDGku7Ojgg0qD/086L6gputTWV5joqBYS2YJx5sjke/yfy1j5KLXDsvj47jJuXRyj6wV0dkSW1JcBdAZHlMkADYU8jnsdW+Nj5y5tUY/Jioz0fPlTUF5Cdm34I13oxOO5bpEbu07E8jI/wAI1KVFivsgk3jacsdvvq2NNENv5LtPdKmOOOEOTEhJVPQE99VuC7EptkqJ9tCV8QshOWX0zqHs1cIrbFkqSJ1MMjOGj2vk9iRg41CVIGoyeiwban2sc33SI+Ttjxls/J0t7qi3gqux4u8tDLAY3Y+FzhzuG/1xj01HBMWck1TLS3B/tXgWjDSSAbJtxypPf9TzpXjV22LGPtca7BzzbTGG8QSKu3LHjHoBp+NkyqkmWrZc1t9TBURgl0P+IZxqHjbTTFXkRhTIKid62p2vMIhuJA28DJ+NMoqCKpZ03otS2KtikjVjuR87WAOHxqIyTDkpK47JLj09PHS09RIr5byYHOMep9tWLXQrVRuSG263PFXKXwQvo3GeNOoatlcI+4u1YaYPE+MsT8afjfRY9nVPohC1zpJLZFcI6SvWZZhE7ZWZQOQF9/nXC8yTwSWTjaNOPOorglswn16tdZZ/qG9PXNC0z08T5hOV2EHb+uBzrd4OSOTFyiVZGpS7Mj1deqmlnjt9HM1PQwIhEMflDOVBLMB3OfftroLbpkg6l6wuIpHoZql5KOT8SMeAfcabjXRNnT+iOt6O2dKOJYFiNNGDtXcfEwpDck/iYgfHm9McrVMLtHMOpeoq/q26SVlbMSGPkhH4Il7BVHsBxouheyrbKfbVRkN5sjHOP8tK5WMkaz6mr4Vbb6UOzClp41wz78Ejc2D+Z090hZn1T9L/AKhWHrD+Cb/Yu6pI13t19C05VfJ4YQlDx2/EAffWBwxptS+d/wBTXi9WUU8fxo5V9CPqD1X1F9TLnc7nUy3Gaqp5WlmqB+N9wGR7Nkay+TGEcainRask5tuRof4p7NW9X2603SDbHRWmFjVKzHLFnHmA5GRyOdU+B5cMcnifbKc8n5EoqXwh/wBMemalbZcKuumg+0KRhXaXMTDbnPbJxkDHoc6by8+PyHGKdNDvC8LqTB1RBS1sN7pZKiO4nZ4yHb4bZIOCqtzjI9M869p9OnHN4bg/i0ee8lennUovs5vLRTzJJJCJo8nzK6kZP5HXl/UidX1H/Ki3Z6etCNNPTLU0yEhssB+mDqqWaMZUL6mRdrQCq4pKiqDRxtGFPDlQvHpq/nH7ic5fYiez1lYJVYPAjN5coWC/PGp5x+BuTbL1P0TX22N/uZJ0zHkskBbKn1xn40jyxfRasXF3YPl6Vulv8OoMbCPBYtUIU3D8tQssHqx4x4bsrGKmqKxXr5JBExy4p1G/9M8ae2l7RucL3/oRXi3iSd5bPT1T0KEKrVCjcOPUjjTR/wD99lc4NyvH1+RlpFVRTipiXbKfLvPp8ge+pkk1THhcXYQuEf8AM6uOXwWp029+QWI7nnkn/vpUuK7LXTdlWGgvFxrAsdBU1rL/AExshZtw/Qf30XCK26KorJJ7Vm4+nX0V6uvV4SoNlq6GlyX3zRFFx8A99YvI8jHGNJ2PDxskpcuNHV7v0bfvDrbfVW+ppKaOASRNPiOF8csS3OMd8aw4ZxgnK9l0sGS6aOC9dfZ2qtEVNI1RMoBlUx+RG5JwfX89djDyatnFzRfJ2jJJdZmoZad5QInYMVXAzjOOe+OdaWtlTVdH1QfoFKoKQXRII3IJDNI+Tjkkka8j/iD7kjZ/FM0vTH0s/wBnKauq6i4tN9nQiKQwgISviiTIYjj0Guh4flyzZaUas0vM5qOvh/7gCy/SiT6jU0t9qK/wZJZXRQI85CnGT+Zzo87zZYM/pxWjCp5b9p0qy2DqDoejo7bZ6p5LPca9DekRQscsaRsY9w7kB/8APWD14ZscnkVSXTv/AEo6eHypw7Wv2Nu8UFXSTU3geA8iFfERBwCMHvrF60k7Na8+L04s571T9IJ+p6JaOW8LTUcYAjhp6FF2j8wc6vh5Xpvlxt/uJPy1NU0/9DEyfwfWWqG6W9VjNnGUhTH6861f4rkXUTG5Y5bcX/oQV/8AB3QVEifb3aaM8ZZqf0/Q99MvquT5iRcPiI63/wAFtubJqrvXOxzt8CnUAfnuPOiX1bI+oIlcfmLET+C6hhn3fzuuJB/C1KnPx31H+K5q/Qv9Rdp2ol0/wVUsinZdplLDA/3MMQP/AOWl/wAVyfMV/cmsj/lRWX+Bmm3q03UlWQe4WnVf276b/F8nxBCPHlZah/gatak7uoK8j0Pgpkaj/Fs3+VErHkRYh/gc6f8AAZWvFyZyR5yiAj8hjR/iuf8AyoFjdCU38D9j8SNDfbky458if9tH+K5/8qF9Kb1YZX+CXpvCeJebpJGowqkphR7dtJ/ifkfCRd6cvuPj/gu6MQsDPc5t3qJQMf8A9dR/iXlP7f2Kni2E6D+EjoaidZYzdFkRg4xMGxj800kvqHkS02v7DLHxejb130n6araEUdVRrNGMAAwqrY/NQNc1TywfKM2h3LJVX/oBZf4f+iJ3c/yeoZnXBCzOqn24Grl5nkpf9xmeUJSZXg/hi6FjlScdPTtIG9ZpT/Y+mnfn+W9OZHoy/wCWG6D6CdHUk++HpWlLk58SeIsc++TqmefyJreR/wByfQmzRUX0ntNDP49JZKGjmHmMsUG1vzyBxrM/Ul7XJv8AqPHx8i6PhT+Lm5W66fVCnqrYUkpRRJAZ4xhZHSSQMw+OQM+uNe98HwsnhYVHL3Lf7WZrtvZxC71H3Fa7bQmceVfy100W3ZQJ05IZluTSdO0tOVLNG7pvzxtODj3Jzn8h+elrYAtXIGooAlZQJbhTo2SDIoOPz1W0OmWerbhLXXyqeRxI4kK71OQccAj9NP2Uv7HbP4PwvVPVNy6OmdClZELlBA5w00lN5pI1P/M0Jcgevh65Pnx4weVdpUbfF7kvwfV/0J+l9u6f6N8aG3O01VUTSHdHuwA5AAONeT8+blmab6ovyYk3o1P1D+mVb1n0VebJb6GVq6vpzBBEI1BeQ4KgZ9zrP4j/AOvDjt2LDClJOXRS6H+mV76XsphulukpaKRgad9q/wBRgo8bIB4IfI59tXeVilGp5F3a/sy7Oo5oQivhf+Tln8QdPH0x1T0NXwxRpU1EtTTHxkGWTarD8wD/AJ69f/8AE5VlyQXVI4HnYFCCZ1Sj+mlgqViqJenleVo1YyPGM5wD2OdePzTaySSerf8AudGHjRpPiFY/p7YoEXZYKbHcqY0/7azuf5Lf4dfESaHoy3ISBZaSJB6+Ggx/bStx+46wNdISXo2jlgkjejplRgM+VMHUqST0S8UmtlCr+nFrrBmSigmDKVIEmBg/lplPj0yPQb+QfJ9FenKhf94sdHIF43SzOw2+2M6ZeTkT0yP4eS+SqfoV0emCem7LHg7gVizp/wCLz/52T6L/AMyJJfo30uAXNjtuGHAEOR+3bQvJzf5mRwl/mCNJ0RaLcN0FroEAICn7ZMD8uNK8033JjrlHqQ+ex0rAFqClkfsH+2TGmUm1tiylOXbHU1vanceBSQjaO6gLn9tHJpdhGWRdMsrUVwXPhxJjjaSOdSp/lj+rm+5DMtZKjq4ppEIO5CAeNHNivJlfbMlX/TLpy51DTT9P2mZ3OW3RZJPzq6OfKtKT/uUSg32QxfTnp+ibdB0/ZYl3EE/ZqT/lqZZ8j05MTi10aWB4Hj3FVfPYJFwD+usnXyVqn8X/AEAvUVshrqSthkjqVjkeLKU6hXfsAqj1J9vYa6301tZeV9I0LGmlaKfQt06b6YsNupLhcmo46hp5U2Q72z4jf09vfcPLkemddrJ9Ieebz5MiSZhyZEnUEa/p+eg6lpkq6CqYwf8AN5WyfXkca5Hn+Ji8FpRbla7GwReW23VBhbLFuBMryfkAOf21xuSNnoL7lv8AksMqeYVDk8eXjSuSGeCL7ZLD00i8eDUHn/mxpecSV48fyWh0mzEYWdT/APLOkeRFq8b9ywvSyr+NsY//ANj/AP30ry/ZFq8aPyV5enaGFyXqIlbOTl+2hZJNVQr8fGntjBRWunUoauLj/qYk6hSf2I9LEvkqvDbVwDUlgD3QMDprk/gThi+40yW2HA8WVznszH9++mTkHHF92O+8tobczSbiOwLHTe4jjjJF/lxO9kmAJwc540lyJ44/ksr/AC4Bzja2Mjd/70vOXwTxgTJNRpTpKdkauOM4yP76PUn9g9i+SOS7QYAgqofKc447anlkfwLyh8Mie7yzS7VliwefKVx/bTcn8iOS+5DLdFhIaSopgw7Mrc6lbfQnOK7ZAnUiqSi1qbh2UMc51Lv7C+qv8xZgv0koZWnVc+pZQD+udFOhlP8AJ6oqpaiGSKSrVYpQUOKgAlSCCO+pi5J2Ptn5p/XLoi59D32ptN3ZpaulkZ6apQhoammbs6/9WQAw9DnX0vH5cfOwxzR7Wmvscvi8cuLOW1tNI0SVQjIjk43YwN3qBqxFseihjOmGLAhY0fi8bFfYeRnJGRx+h0EfJagoTLR+OuSgO1vg6rloEg30xb6cVnjGYSSRKZREnfj/AM9NKrb2O6SsAV8hkqpXPBLE86cqZtfoN9QofpX9Yejeq6qBqqhtV0gnq6dG2tLTbtsyA+hMbOM/Oqc0Izg4yVoshJwdo/TeTqK32WljpLRBNT0cUspC1MoceGWJTkY82Dyex9NeJ8p4M8+UI8S1eVx7lYW6D+r9F011BHd56SO9vTKTTwGcxpHKOzNgHcB7ajxZY/GyrK1ddCS8uMlSkWOtvrv/ALV2C1W9bZT277KWWZ5YHLGVnyXyCOASeO+ONavL8yHlQjBwqnZXHyVB2myOL6t9BpFJJUfTK0XWplh8FJ7rM1Z4aZywQOvlJPcqQe3trR431GHiQ4YYVf5Enmxzdz2Yu5fUOirKuqnhV6SFpGZYIclYlzwi5GcD51xJuEpuVVZb/HR/JRbr6J5Y1c1EaYJ82V3Y+NLUCP42K7THDrhKqZ2j8d1C93yB/fQ+ERf45PqLBtX9RTTzSKlNPMuPnv7DGm4IR+f9oF6h62W4QpNT0Hhq+ch3cNx/zA6R8eh15t9RIJeqamLc2FZQMFMEn4xzqaj9hH5Uq6RXPVtR9xyqSMOMBgdv6amox3RX/FzvpFqo6nq8Bl8LaeSVbjUaol+XkX2I06juEkasmzaQGIwSR+ek5L7AvLyPZY+6udUQYZ40UnzSP6DHx86X1a+Bn5GV9MQV12hZP94Ta2e6kkDR6qq6F/iMyfZHJNdcOHqPDYHbhUGME8Y/++l/iI/CI9bPL+Yc4rYhg17SHO0sBnH5403r18CyyZouuZQkhrYmM8lTJ9uckAk5GO/HfQ83zRU55b3JjGM0jyFHn7A/iI/PGp9cjlk+7LCXKobHhyMjKO5wB/npuMDSs6XVi2261yXujrKjZUinDOkLElS+MBiR7DPGtuDyV491HsmPmSj2rM90xb36Pqqyrtyq9XUSu7yVGW2yO+5ioP4R2HGrfI82Xk41jmvaiteTKM3NLYc6flrrJFPFSNBEk8zymJIyEQsdzAf/AFEn9dZ8mZZYKEt0PHyZRd0E4erLwxZfERHXniI7T+udZeMCxebPqi0erLzTIWYrIMZ8pIOdHGC+CxebL7CDrO+sVZKhIySeG5Px30v/AE/sT/G5PhEx6j6glAcVSKrHB3HnP551NR+ET/GyGz3O8zIQ9dGzZPLdv89Q4oSXlyZUmuFTGwkeen8QD8ecY/IZOo4lb8l/YuQ3SZthV0JYZPOCCNR6aYv8TIU3AGQFqqmTK85PnJ0KKWiHnm/kgkucTxK8t0UwyHCyFwBn1AIGp40VvNJ9yGNcqORiFuvjkHlElAwP17ajRDyt/wAwktZQK7I08q5G4ATAhhxj8tSlfwJ6nxZEbpRmV0aeokO38MeGwPkjUaWmRzJcW4KZkqRHJns7BmYfA06UQciF6innfbTSKT64BPOlnJR+CLvo8tTSwgQPKIzGoIRBn9/Y6mMo9NE2yg9zpGVmR3cgbmZkO0n2B1Lk10heVjqe60lRIUgVhuTc26MhUPbHOrOS/mZPJvoXx3hjU1CVUwBI2ogxt7Z76VSj0mNykghTeNLTrIkcu3aBgxkkD/X9NS8serJubAnWn03tX1UsctrvVBKJEJ8Go2KkkDkd42JHxkdiONW4fMlglyxv9/yNGMpPs/OTqWzSWi4VFCJPuPs5poC6nKZSRlJB9jjOvoWOfOCl90ao9AQnB1YOHul7J/P4LrEJVjeKnM6A92KnOB+mdU5J+nQjdUS9GTQtPUUlQVEU6bcuAQD3H9/XOrGN8lywWqaHqWnV6Rpomcq0Z43oQQcfp/ppUNRmK6QPUybc7d3+LvqUJ8kSngjvkYxqX9wZ+k1tpaegtFtSogmkqFpYUmD8+cRqGHJ7bge2vms1JybT+TA3Deg39tFNbmWliFIzDfGkCkH8+2pjipW3sS0+kVzDmoRJZC3BEiwsAQe3fH/30ekkTbFolppVkcxKscR8Pe0hBbg+w9QNNDGn8kJsZWxEeG9OoEKDmPBfI/8AP100lFfJDsRbdVVnnCxwxgkhicsy5wAf2zqtuAe5hCKCVoyKianRA54iTIK59Qe/5aq9SCLHbQ3+XS100e2vkiQnd/TTYO/f4Hx+Wk5xIcWyJukKzxJ5HuckqtlVLeYKvf8AXj39tK8tdB6LGUFunLuBGtTxlPEYIXwe5AHGNSssmtKyVDjpkE1GpJl+3TyOSI4XGEJ9z30jnlfSJ4qrCopB4CFI40KgDDe57DvqeOTuxm4paRGqVDhUaDw8csImLE8YGf1P99V8Zp7I5JodS2a4vTykRjcV3KFUdiRgf5/rxqIwfyJYQoOnbqs7PNC8rQ5DM68Aen/nbT+jb2x0mgdeYqyklJqWQRE4fwVztOpeNR7FlJlOVpY5mMKGp25dGTswI7H9v051LS+Ni3L4BQmvks8jJHGYi3EjSbsA+uPnTJQbuRF5RaSS5s4+4jhMfhKBIoYBjk5xg6ZrH3EEpyfuLCRM0X9KaJcDjnnPpp7tEV9itPVVES7GraeGbOH8M5CrnPPHf9tR+5H7sq1F1hRnmNZBJFHwzkkBT7f+9PGDfSIbX3LdNdI1aOAVEjLIgkG2NgpOe+7t+mn4yi6dDqqErJHpX8SFpC8nDb5VAC59s6inehWqI4pQIZWas3O44R5dpPPGMemocWmSurHNdkKeM0SwtnYSpLBx6aWML0gUr3RZhqJah2lhZPABUMxXGfy9Bj500opdEpuxzyfdtJNG7rTjjeSDj37froqTfQN2RzoJWieiafYU2sjAE5984/z01MilWiWdKRsuKaoaoK8P4hycemOBpHjbdtj1H7FJKa1NVReJhaoDcsE5JZFxzyM59Tp/+olQvtQdSttkfkEFMgHYkhsn3Ax86VYW+2NzivgH1T09VOUaqgjOwk0yosatx3JPJOrIwUVRDab7ESpjr1RftwMjIYqqE498ZwP+2jjF7Fux9PcdqNHKVjyuXipxujZT84/851U4x5XElPVMmtVYk0JSgpjGp8q7AAB+XtpJxr9RKk3+kupVSUtsVKtoPF3BcRhufXGff50vOL0ok+5LbInW5VlNFVQqoypXwxwWbPLMPbGeM6SEoRl7h3zkuQPpum7hFOsq1cNPIMHwY5MHJOSSADppZMfS2VKE+7LkbVscigyirrEBUI5bZx7tj9tTyg9NDVJashlaWqk+5qTNI2CrCNWAUD0HGOPUnVkXD4Fd3spUl8raqriEcKvC3lEpqtxwD2wBge/5alpVYqbui31NLI9sual2SMQMQ4l8yHY2MAjjnHrnVUMs3Ne3Vofj9z876aGV+nZQreYqGYkZO3tj45wdfUL+Toxjoy+CSffTDdFu1V8ltqjLExQsjRkj2YEH/PSSjyVEPZDTZMmR30zA7RSwQv0Ot1q8fzKYiOEBSAsQGOPntzpdWWbo45XqFqWIbdk5JPvojsrRP0/Smuv1upgMmapijA/NwP8AXS5XUJP8MVn6eXKimoq9ZZrfJWHzeSGXgMT/AIcnvznGvmTy0tHN4vtjbeHaYzPTwwv5n3iU5AGcZ5wR3/LWf1Z3aHpLsdNbKoz+LBBCsO3zSyTevGAvHJ507yzq2R30SwdLVC0c1VJWNKxA2iAY2jnPH+LSc5ONqxlFrtjpLXSUgWWsrJYaeI7fEYDliRxx+Z1DbfwwquwgsMEbGShpyoxsBcECTGCScj09vnVigntIHJ9Ir1LVbQJJLFCC5Xw8FWJA7/qefz0KD+UGwW6XNYixkiigmyWQEbmYeinHbHP6eujg18ApSXyR0tDdagxxNHulU/jWQt5cHAxjBPPP56ZQ+5FyIqy3VaHwYqmONWUlt5JOCM7c44Bz/fVlUtCt/FlZbQgpyiyRRSiA7ZHkyc9uMfPA0ia+RqjWmT0UteyeC1TDLhmbam3zeu3Pr6aVZH0OoqS7LyzVtFDUBakUgb/hgjxCrZyNpA9j8entqeTSJ4xXbJY+oEmjKTXGqmkTJxToCZGHbt79s++p5b2wuGq2T0/VPhQwmn+9nLwEssp845I28+40ynXQc4/CB9XeiNjTUjThgWXMvmQE/gwDkkc+mlcmuwWSN9FeY0Na7h7fDTKCpiKucc8e/PHv7ai03sHkV/pQjXb7SRIaVoAjkqYyBltvfv3/AE7HTcvsR6rRVa/Qxsjh4QVJxG0XK+xznBHftzpr3ZDzO7RnHo46BGjnrEpNybYDISXdvYD2+dWtpbirKq+7KU1DUWqOMlpSZCdrNDvDAg55xnHfV0HDJt1YrTRPbaSzzU32mVR2XAVkbbn/AJsH1799W+/4IXHonlpzWNDJTV0cFOn4o5GIIA4yAPXj11FvprYVex1XSGaSBnr08KYgghAdo7AbvTOiLu67Br8k7RGVEWIwijOPE8UElyOwyTx+nvqV10AsET1cYMcE6OWKjcCFAHb41S8sY6SJ7I46KpiqcNJCC2d0fiYRiBgMQexGp9V1bQUwjNQziKMK0lRG2DviwM5x2IPProWZSJ4tHqmnr5hGscTblIKRtL6cj3+O2m9RS0HFkNXRjape5SQzgEOisvmG4Dse3ccaFkSIop11tnVoYnhqKieVmVEwDCW9GY8HGO/pqxtP5FcWOslsENcs0tREniIY8FmPm/6eeBj/AC1WmmTFUP8AsLU9eJpLf9xLEi7qnLDtk9z7DH59tU96UhrX2DMdZRVcMqwhI/B25dAAFB7DHbucatUdUHfQPusF7rxTpSoxpQzKpaYIsgz6DGTjn40jUIg1J6NXRNNDQoI0jjlRdz71ypHrj37HWGpN6RbtI9FNUzESTFUC5KllAQ/p3xrTGD7pC22UbvdryvgNTxwrS5KbgRs+O3fjJ76aUIr+UG5fDPUldcE2fcNSKzEEFF3sMDtn0yNLGEFK2iLlWyC72mok80lVVxrvzG9KuFfupHPrgj++m4xXRDT/AJipUVktnhempaVyzkx+L+JlK8+Ze3b10cE++xba6JqW6eD4LGNoS3meF1wMggD885PbtjUenFURyY633hLjJU+BAlRHMT5aaFnJP4WHrz8DjjTe2DuyU3ekfn7QUzQR1lE4LeE0kbD1ypK8j9NfQoyuKZ2IbRm7pQLSGJ1PLjJX21Yn8MJLQOfToVBnpi0tc6h/RVxk40r+w6VnYfqBcY7V0xRUQiMRWnwDxkD8QGR376UdnCZ2LSEk5Onj0Uo1X0jtMl8+qHSlDHgNLc6fJbsAJAxJ/QHWXzJ+ngnL8MWfWj9G7d1Nc65pnqEgaQTGQS08flOXPYf9I/TGvm0pv+VHOSk+whcbnUilCU0NHBKwU5l2k9wfIBx2J478ai210Nxl9yrb7zcDOkEsIqwWbaVAjXBOck8euP8ATUJ7pjRjMjl6tvH27JaLdFQUxwC0i8oDwW98d+2mUqeuhXGfRm7TDdKW4NVy+DJUSbWKGUtHIwJOSO6k5HPrwdK5bVFcYO+zQxXavmgqYg6NU+L4eAGbBJIYA9iQeAe2BpvUklQ7inpMhpLHT1HjIz1QChoi5Y4Zly3Kg8HJI454OotXTCollqaeWB4nYvCAZY3YA4OMnnvnOMfnpeaaqxklRcr5Ki3JTxPU+HPtQqceZRjk8DkkHP76Z5eOrIaX2A9ZDI9SSZjUrHM+JAVCs23lV9SePXHfSuXLTkRqMv2PV9HHSo1WkLmWbDxqj5LkAZ8v5t/Y6rfHsZzW+KEoYGlp56qNQzwYCsWMZc+m1exzo9vwKn90GDQyuyh3jpYni3LEV3M7gZz3+CAfnTNJ/IXsHxWmC03BMicRsviNKF2n8P5kA55A9ce+pqKaTYnT0WKrp2BaNJF3VIkAmXwQSZyQxH5eUE9sDHPPGra4pJBSaszNPcciCna0uhR94qANzO4yMNg4APAz+mqvUdFad/BeobSyVFO8c0UYfa4iY7myMEZHYYwQV9e/IPDJukxunRfgprbPdab7QsXFQyyTnMpHOMBeCORnJzjPxo5L4BO6oE3mxUQrHVKj+oTuQI6kK4CkD2IPPrx+uhykhWldsor4yVC1TUHnXgsz8pgH8IwfTXTlLFJcS/aekLSdQFqaTitYu+I8RNgkjy4OOP8ATnVUljTXHRKtptga5q9VNGYo5aOaMAM1U4WPGfcc5znjTRaivuVtX+C5Q9DUtfLHVXWtoRUBAkLwSv5j34zjntx86SWWfUYsZY18s0NTZ6Opjgp5qiCrKkAO8O/H+ncazLknpVZY66bKDUNmaZYKKGGVmYLsWRdgIH7j8vnV75xWxFxukXbTXJGJYLlVUjqSBHBCwGVBxnBPPOklye0i2Cj/ADOya5UEW9Y4I4IYySvihPKV9D78HHOqovbjQsvwiG1U7NHHBQ/bTJ5pVEY3KQO4Ud/XVc21KmRG30etVuuDNNDK0cEbKeHpmQAk8+v+WhyraLFB3Vl006W8Z+8oKeGoABxGFIfGDnPJwM/nn408ZUvcwpN1Amho5HRpXrmO8RlSFEgVQSCRnkE6l51+43p/LKi9MmFJvtg1RMT4gE5yA2MkAe3wNQ/IkR6dfp2MutslU+BUKjp5VkGd4kYtgLjOcD079tVrPKwlBLthmls9FRTxyRW8SNLwJjHgQsqgA7e3fHHzq5Zl0HGukQVIq2gElDHGrFDIJpwd0Skkk4Hc+Uj9dVuaohQcuiKqe6NSeE1XBHONkvirwVUAjJB9yc6olla+RvTcl2RS28F3guFW1R4SL4RTMbMCAAfbOc/nnTKUm+2PPHGK2LcOnrYUqYo1lkdvMtMjswVfQbfQ8kfqdXcp1oqpXpFqh6dpVoyWppkVU2lWbcFiLAjOT3HPzjRBTe2K+PTLdRa7VR4r5zERFgwkzEkEtjIGeB6Y07T3QrcY+5k1oqbNUTb6SMRl4/FCzAh2Hf8AF6jgcj1Ok0ldhGcbtII23+TyVkMUEcTq5kIaVdw3bsEKPcZz+mq+Sv7j80ukS0RipqpHNTDHIZFmSIQ+GPDO08MOM8Y7/n306aa0Lzm+j8+uounvs/rD1vbfKEhuNYyA8AozM68D/pYa+j+JPnghJfZG/E1Vs5Lf3/3xhggKcAEYOtcdseX2Be7OrBDoP0igFVeBTkBvEOMEZzpJFsA79aaqMVngwnC5C7QQQMDAOe+eP7aUmb0cjkOXY+mdWrSKUar6W9Q1fR/XVnv9LTTVIt1QssscB2s0ZyrgE8AlSRzrJ5WH18MsV02iJRclSPujoH609M/U24vS0klXa62MAx09xUQzy787/DZSVblVyM9ifnXgfJ8LyvFjymk190ZJYpRdyOlyWg0Er/78k0vhBY12ZyQOVOO59j7d8a5ybI5NPTKtPRVMluqJZpzPIhVWhKhZW8rZC5OOCuDjgfro91P7C8pL9wdRU1VMu2qSWmkaFfHhaMgt58NggkhgQWx27AaT3XQluT9xO/SbVE/itUPHUmdY4zEV2grnZtHrlf7g6FGX9R2kyzIqWqnkd5UnfxFCR1B2huQe49iOQRzg99QlJPYWkrstz9QCnuEkVldW8CISF48SbSx3EbW7kZBHuGGiUG+mK2u0D46uqkqKhGil2O6pnZhUcHduJAGcgggfp6aWOFK/uRZLtkq7iiIrrIqYWokQhi4IG5QeR5j3Pr2B1coe6kS6ZCsbVEEkkQRckYZsRtK2TyCBhScqM8gADUpKW0hf3C1s6engnDVMIqPAj34chlaLeSVOO34cnHx20yxlkXx6IKGwRSfcr4zSRxN4khE5TJUMVKkjOCMZAzk+2pUE99ix+5PSXCOJoRUTwJTRT52xLklyASN2RxwPUjk+/LRlS0RyKlyrIqCnZVkU/wBZ0JlzhcZZJEyOAxzgc++ANDVOxrpdAynvr3aeaWGAUlvgqI4IVpsMXQMCxVQO59eRwTpXKLlsROUv2Lc7Us9wqlroHlp3m3SRxcleThtwwcKQx/LJ51FLk2y1RT0wZcLhbv6irQxRxtLv3RRsDMpyEBIO4eYHBPyD8S+NdaKqUkD7hfa2Cjjpp6WN0CrMu6HlV5IBHByPMSG9BnnVijUdkOTqkiGjqqmuvk7pCimJWDx1MSsAM5OTx3UrjHfGT6ae+nREYylJ0gJaaq6x1SwVUsUr5UGUHBUkcDtgkDJ9tXSikhFKV6J7hHd71QJNSVaw28ytE+47ZSM/OCCcfHY6sgox+BpcpLsrVXT891t0cM1ZURR8kyJIvhspzhuMnPcc/wCR06ywh0heLa2Vem4LZaoJ6Sor6ur3yeGisVDIoxljnJydvcc9xnnUT8mT0tDJxWmGv9ximjgWjp4YZVCCoKuFB9clTx+ZP99UuVvbJdL4KVP0vbIz95TrTLUMzBamGYgh8nOATgntx3AOk5uv1WTa+C7R2SpWKD7mshqZZHy0kNMjEIPUnHcYPAOlnKFaY8Yu9sKVFwp5qWlWeeabwgIm3xBJHIGQSoGMntgazOKk9aLpfYksnUrLSmoitMcbbSkecb93Plwex5APPfViwQXbIjO91sI019uEtZ9qsAO1iGjyAyjBY7dwwTkf3wNPHBBDvLK6SKtyudbZJ1jf7aEht3isxOOSTjj0HI5yfTUT43xSF5OBBLXVaVoqRMKimEG6Jhk5ZivH5gEcDP8AbUrEn2HKd2Om6geSOmqY65aWRP6oSYkFcrtPfk8HOBg5HbVM8cbST2K5Nr7D6eGuqXEjNHPUruBWLJbjvyO5x+2s0cKcv1DuNKxz22qliAUtLGuHRllwT5u2T7YyffGtKhhxvuxFGVWFaiyStcMhvEMeQZ+TuIzwBxx650c8adFjWyGShKU9LF9v4s1RIzSMqbvDAxkH8zz/AJ6X1IJa7B7dJaM3EldJVKJIGCmpBii8DAZcBQQ3fIAxg8c8adTTrZVx/Afiqam0VBE9LJEylikE4YvuOGWPAODkarc3FlySUt9InobuKmGqKLEGWFPGpwgVkG7zFecYHBx6g50ym6tFU8ik7qiKpoIKGan+9ipjSBzTVFPsDBVYqysT3wGOT+Xtpla7K5NR1IgrJksMDVdHF/SVBE5yN4Ck7dg9RjVdWS58VdGoo71DK8UgoPCURFBIyhAvl/4mOcAkscjj09dSpL4RY3q0gLTXWqllqaaaKWmE7FzG0JV1AI2pHkkMG4OT27451FNaZVzb0fI31Lhmpf4nrulZEwerMBdCOX8SmQD4OeOf117/AOl2/Fjf/Nm7BbWzinX9EKW+TeXaGPb5Hf8Avrrw7Lp/gyzLjVrER076HoI7w9VIhaOEZ49WJCgfH4s5+NUyLoFD6sVyVHVFSsUheOMkkkYOf/eoRXPs59q4g2XTtfW2qgVIfDp4j52mkBySR/21mk7ZohcVo3HRd8kkET09wJKRuk+7CN2wUU/4geODjURWx5vlE7h/C79cqmvv9T091NNJVXCnimqKGrl8zSxImWhYj1VVJB5JGV9teZ+r+HGFeRjVff8A8M5GWKj7kdrrepbfSS22GNmGIwUkB3DDO3kYKOSpwMYzuONeQ5OlRl5paLs3VcdMqLtnqHkHiI+8MIgSRz2xt2A459f1e38BzaIJ7mlyt0ckNXLRSPG24HD8IfJIq8fIwRkZ9ckaW7qiFO+mQ008MaSpTl6vuZZanc5jyMbccEdxzgYY6ja/SF6pAuqvbUdQ5VWeSdRO6NT4Ktyq5PB8TyMcdvw++nqSXIRt/Yv1fVV1ilqoBEGramiplMaqzeICSQxBGQT+EcDt69zbbb0x7auI49RXGOelqvt0SraGVgrNjBUZfc2fO5Gdo4OVwOcalKVp/JPufwQx9U1NPDGU8Skkklkp2+6TCxAc8t6EenAyMc+UjQnT0LbrZbqOq4LfRsyuZwsZELR8PJgqQhyexf0GOx582nu+h7SWi9Jc4XSAMWjhlG+OaOTJj7Fdhzgc7hg57HPbUppJJdEt0tjqkJU2irLCJYnZHbEoKrhskjI7ttxj3ORwDpWlv8kumijWw2+uudwjmllFbSMojmGVVgy+bfjOMgbiO2Memh1Em1K2+wZb6m3x1lMlBVxoIF8OFXOAzhcsrMAcldzMM9sDnjSvvRMZpJV8Aq63uFac0lLLIktLI7yrIXRk8vlUE5BGNpz2O4jSvpRKnP7F6juE8yf0p2paR/6s6Rxf1F3eVmU9nG3B5x685GrIyuI8Zu/bovXlaSG1ikhnWaqbEdQ4BE8mZOURgxO3Z2APfA/xcu3aqx24rSYKqbY0T0yW9Enm3Nslgm3QlEYFmYE85yo24zhiOwI0L7IRNPfyZ6hudJTrFWSU0u9iFd8YZDggkKTnIwOf8tbHhf3M0eK2wrLVp92vjP8A7v4B3IoXO5iSqkgHcMHn3I0jwvasstNlKoW+28UskUaVgVHWommCALHgEY9Sw4P644zqPTjBXIsim0WaezV9ZBSz1gp5aZ/6bNG4ZwByGDY8vdQPYZ0nGBHf6iOWB6dfEpw9XNBKi+GsR2lcluPTGTwO3cZGk9JO9kct2EbQsNe0VJNRpvd3ljaZshGB2sufyAPvn8tVvEl8jxlH9NBCkoamSthkR6eeRISqRwxFccMFznAPmI79z+Wl4Rf6iyMmpqS7Bd3pKyrkEXjeK8U4CtSN5yCFIz2GAePTHroc1BUkK3ObbsuRW6mVZFlWqSF2MGHTkqD+Idzw3xyNUyzSWqFUb7GVMtDS3YBDWJEIiN0UgyhAGwc/OBn5Gro5JvYsmrpBGGkhhmLS1zTyzIc75AwYlgV5x6c86JbQ6mrthyntUMEInndDSwv4e9pQQxxg5B7Dtz+p1TKUvuXudf0JKaK2M8tTFKplhjEjoIvNsPCjB/8Aq7fHbVXFyfZZHJBe5iM0NsWaS3JIJ1KNvKllOQc4PGMeuMj350Si/wCUJTg0uEa/qV6KrmiaaamMrqfMSyeKoBH+H1Xk8988aX0/ixF25JaJpmmd6F8ssaMxnk8QKrHO1V2nuR8dv11KxxSoi2tsrWirmo95qZhTU0hdA04/4m4YAxge/Ykf6asgoqylSaZoPtpbTFM9dU/cmRMhCwLplSA49wBt5xnjRUE26GcpK02VaWirULpb6xp7gp8WMTtiMyMoXDKScH4z2I1Mcjb0iv3dJgKrtV0prvJ4kC4LnwmOPOQAxwM8j0wfQ8aqcpPQrhK9ktZSVVdao5JpVC1DKiqwG4puAYsOwGAcE9+NLylpN9jOMpIsrSxRI9E9AphhqZTFVSqVUSEgAsPTdg9h659dWKUv0shwpN1pFitqKG2wzF4C7xESQSzKMny5KBc5H+uRwNaoR5dA/bplqm6zhqKhq+novvqKnMZ/mcJHi+JMWUIFb/GuxSVb/mGfbWmWNpkKS/Uj47+u033P8SD1YXz1FNSSZSMpkhGUEDP/AEjtr2H0u149P4bN+B3E4j9TqoVd8ebZsMztLjHI3HOP3zrrrs0TsxjHJ1aIjvP0Boo6fpu518kZfPiKO2MKpb19yAP11XLsujpHKesZvFuNQ/8Ailct+51H8xXIzIGTjVpBrLlVyTWxN7vFEQEWMD0UAAnWP5NMn7QBRXKotdSJIHK4PI9DrTSaM6dG1k6take3dS2moFHdKWTDIG53Ae3cqwJB+CdUZcUcsHjmrTKppSVP5Pv3pm60fUdits9Mnh1VTTx1aRqyeHDwGeM5GAQpIwcHj37/AC2WLhNwXw6ObSsrXGoo7vFWSUcYoJpqaMpWSZDKpCMY2AUgsBk7uxxgeumnxq2K+LutDaKkjjqzWlnFU8vh/wC94WHaPKCE5UBj5s+4IOhKnpDpOtHqgzWmlMq1BTxqYBWpzkMUyHLE/wCJgRuX3zzqV1ZDuK7NF/KLeorpJ1BS4p4aTpHtcjKkMCTliSGHBwQMfOnSTLUluP3EunUENPQyXGWjEvjTxO9SyMoJRNqKG7AKAAO2D+ei3XIS/bbM/wBY9YzUsx+4p4FjmhUyRRkHAUkJhlJy21sn1BB00rb2S246aKNhuFesTVlVaI1p1lEVS4A/psrqQACeDjcfTPPqRpWqToWKbdgeLqGK5GOkjgjKQOXeZYzIzBnU8tnPBKsM8AMRyGxqab3ITekkOk6/ax3Sq2QtSwyybzTE71RkJU9iQcFxzxwcn21ZV7XRLddBvp7rApMtLX2xkt0pZTP4PEjFgrFc5DKNnbA5LD3w2ki6FP8AXqxl1usVwrY0ppI456iQLN9yzRjwwCqecHKtsJIBOcce2am72yttaSK1NBNZKqGnlWCqNEq+OYWyGdNxDIisrEDlCCeckkgkDS8Y6SZLqPeyG41M8i1tXNK1dEJKWKohEh8VQ+5ViYHkjO3B5wMZzp+DmrYklLbRWrbHU0U9RR1CrR1lHs8SJEcqmByBgNj8ZJGOdhA+Vlj7sFF3xa2i5HDW2m6rLUVj0vjeItLLBAHNQOSduWXCjLc+hX2AGoim9seuLt6KUtNXWUMTdvGM8s6BJYtkjou0+UenCnuMZXHPOrf5dCuHF6Yi/SO5NQzGlu7HfKZTNJmTzAE54IAHJP6+urP4p/YFjddlr/YKtqKygepun41DxmAsGlKAhSoPBOTnLH31S8/dF3F65GmnLWe2vb6GeI3GRWkj+6cFHIwAhwcDJ9+/zgaSMlN02WOTjFpdkLVbVtNTQTMIUl3B6dYVzCMLvYEYzzvwDnHv21MeEXa2UuTlHegbU2O7ymKl+5SaUozJIh24CncreGCN3Kg+ncn01e86T0R6erbtlO1fTquiZJmuMzMzJIKKEB2EjY3YZccDk49z+eq5+SpaSF9L+Zhe09O3Sghlmp6ueSBclKd14ZmdicnuMe34fXWaU5S6LIQk02HLXYrsFnqEKVDBMS7lCBVI4JxghsnHqO+kak9seONp7ZFNavsbxunNcjSohMZkLLkITyMHGcEEnGONQm7GlipsZZbaV/3pKKFqWpdowsco3BN3LbP+kKT+f76hN9sq4fNF+ahtEEtGKSNx4O9YZWB24LHOPXJyBgju2jne2PwgnXRRpKVHiqzNSCppzGBPChaNWcjse53BQe3GONQt9oEqttHv5TMsNwuZqJ6arkP9MSMG8ikHw2Y8jAyvHoT20vuq2S4pXo0NnpjE1K8lZElEsDCZ0my8gOwj0zkZPIz21eptRT/BcljVJ9CQXBfvQlOZoxEqIiSAuQCTsGfThuR6HPoNVuUm9FTkk/b0NS8COc0dRDBLFE2FqmAXYWwqKoODg7f1yP0TbCOZR1JWStW0EZnrKh/GWF38SJiWICd2XcOeQME59vXluHL5E5xJmro5KOBY5aaes8VY1qXCqs6bdwBByOQTzgY9Ox07hatA8iZVF0p6edJKeSDxSx2mqI4Tnedy9xnAye5YdhjUrW0Q5K1QNul2mrKNXjq4UqlfKFGJWNzxgk/4RvPJ5GMd9R7dEuXH9x9NV+DUJFLUQqYZhmJVUxTAY2DGcgYznnnPtqmldsrjN3VhhIFv0HkjxEZfGepghZknK5b1OSQCCWIGCfQasX4LHb0Zqt6dp6nqOJ5JTUNCBUy0u8rEsqICMjOfnAJB2Edzq6OVx6KZNOWxgtCUdpDvUNU0s7rVlYiWUuVHIJ5A8jZHcHVj8iT0kJXyfNH19udsP146er6WRmoza4GaYMJPE2yzjcMfGBhueNe3+kycsLtfP/o3+N+g4Z9Y6V6XrKrJj2QSu0kRC7QVJzwPTknXbj2a2YQ6sFPof6bSrZPorUVEkTP9zHUSEMDgAOFU/nnP7aT+Ys/lOK1im6V8jjJ8xOTqtPbKJPYFlj8E4PfVydjJlo3OaaGOJyXVBgar4Ifk6pjDEZYyyqWwD29NMlRW+zR/Svoap+pHXdo6fpkZlq5h47KQNkK+aRsngYUHGfUgeusnm5142CWT7dfuRLrR+kNmiFtsj26iZKUmmjiqYYYiZCI32sgyMnDMMtgDGMZwM/M5ZG+TXbKJY+Cr9rGW+gaiqYI45xVUklUJnWPa6NKpZipwvoWwzDyqCBwe0ObbSZU4xvTtFi20FOTBOJ3BhladJ6mVVJOWcHkchcc4AzsPzqIummittrTEWrt1z6fQUkKUrwSlpsjxZ5vMQ0pj9FIYY4Awc8+kuSUaQ0pKkqK0lxNTaanwKKaRkVFjkZy/26+JnaGPI5IAx8986nkPHJF2qMcJ7pT2+qgrLdPRV0TytVQ1BKqVEnDKuQCRyxIOMFTyDzKnUmjPJSS2thSngrXuNM1SPGq4acMtVVMT4kGSdxhBwSykgkE44JGew8qpJl6vlUt/n/6LFZZqOzQ/cwyrVUldJG8dC0DEKVZ+FYZO3AGCx5yRjOoUl2iOMYbStMGUXR1NVvVNbqlaWrgK10dLEpMbhQECyKo353kjIP8AiORjV3NtUxZQ4Sav/wBFe4dNVN7mejpWjirZoWysXkRXRHDYIGGGfNuDEkpgnvpVJfKGnFaUfsVZemb1090hK11g+6q6iMbac4ZoMEuXI/Cq7XO7sc4bupwclJ2Vu1DYbounIFjoIqtIzJRA0pq5gSpjYKOB68uAG7ggD14RzkuyUuJahoYEr1uFYYlmSfwn8WCVI+EZtrJGc7Tx5fcdwMk2RkpbfwRBxVuTBV96ZuS1VRHLV0dLTVyRkUyAiKNkHDZbzAjJbJOQD+gZ5F1QdJ7pMqW+0rFTss9bJEsammZ6ZkAdwrAtICCMbWZgex+OQa+e9ohOK22Vz0jUwVtWlzlplnaF6emioCC0khZcb3YEjuCoHPAAPfULJUqJjFX7zVWiO81tE86ypUU1CBEZ6qAIojLhCgLAMi4ODg+UkHGWJI253rolJ8aroo2XpqvL1c0VzId8GSIuSmzJ4CgYBxu5HfAz8zxbHcGlprZHU2SpoqmRqioSnpCSsccgKMVjcMSp77Tx+mdLKPEoabe2eNzdHp6eqrNjT+aCqKhC644I7kKxyoLc8caFFIbnWmy9R3S2TWyQxVMlYGkcpI/lUgKMI3HP4kHH6kaGm9UOpKO47QWN7akFQZad7ZFFkL4kRBA8PJQ4znBOD6e2fWXF29dFinGq6LcVXT0sJq6hmNFMBJHHBDlg+89242j8IORwM6VRGi0k4y0CKeWVBXSGriEhmRQiNlVww/Ao/ESuSADjI7gnUrHoqjJ7bYR+8jlVblRwlxjzF12MUx+IoTz+Eep/EDqXCMfkdzctxRVpLzcbdC1U7NRbl8Yqq4bzMoQs3HcHLDjGCffQo0yFlabvX/PkvXKWooEUoEuslNJiWeNhtJJG9yO7KQV98fmNVSS67JnJrp2/+WCqk19wWnlZB99EpqIqWQFhjLDeP8KMrEDn/pPfUJ0ij3S2uynVXq8Wint0c/2YmiKxyPLKo8QknerE8EgEY9eQPbQnugfNRNK1TLcaSh8STw6MMqicjEZl2lidg82MbPMRjOO+caJbVl0YOVNPX5BX3NeKdBT0sCQyzMwqERn3up/qEFR+I5jwoGd3fAzqKuNGZ8q/BLIl9+0pIoI6qlihLlg4wrYY7sE/hYjb+PHOPQnUJUQ1k/SxzR1rVcciUUkVXVTGGKEt4aKCu5SS3/DKsdufTAz76ZtA4uwNR014t9w+zqHaqp6ibKsF3EAHGO/DbRjGfNkd8cNyi1Qm1pjp7JV3bxKa21aMJ9sdME/pspUZIDE4GA/pknGptJJGiONTbaLsNNLb4H+4rIJlCGCoRAyhV2AMVxnA57dsEYIJA1XbT6I41dgaC62+qvJt1WslR4rpMKt9qxJTugaNix5K72BI74Jzpl1snlBy4ysOQ3GjoqKtlheGlNPEIGZHWREaRWQOOCP6nmOMkBSQTqFFPsOSi2ooLWCpENNQtLUxxVLrIfCL4lMW8kFhuyASGzt4A2YGdQ2ouwu/dJgqO63K59OSRvUTXJIYv6EJV0WTzltiscFSSCcN79zjSW5Kv7EqMpx70jBfW/p2sv308+0tM09FdrksLwUdNucTyDGYmYgeHktj/wCXBA11Ppko+N5Mck1a/wBrGUEpJnx9QW+5UnVi0F0pKmiuUIaCWlqIisqNyNpU4IPOvo8Zqfuj0b4jvqp1L/tNDQRvCFqrZmmMgGCyHsCMf4Sp/wD5aeOmWM5/S0zVlXDTxlQ8jqi7jgZJxyfbVgqPpDrOWKyfSWltFvn8Snp6eNfE/CJPOMtj5O4/qdVFj6OD0FQI2qV3BWI4/vpV0Z5dguuk8aY8fGdWxVDpleMEkAalkktTKJFQKoG0Yzjk/noFR9jfwwdAVHQXSJvtTRU81wviQzhKiD+rFTK7MixPzgvty3ByNo7jXgPrHm+tm9Ffpj/uZczknTWj6QTqG31FPQLWVDtHWSeA4pwR4jK2SpLd48bUzjGR25OuBapkReN/qYRo79SpBVTMI6iseVo6SmwFEYLmNSPZSnt2HfB0l1ciOS2k9lbxaeW+fbwUSyRvC0QiAXb4rAuQGA4JY4I4yQfy017ByUmuX7FSmulr3wRw14oo1/pYeRTKz7ihBXPcEldw4GQM88rfFfgXlBK0Ca69xUNZBDS1U1N4NQROlwwJqdQ22MAABRtJOffd6ehbk6Dko/pdAug60pembtRAyLRRTnEk0Gz7gw4LMqlshQrc4OcB8cjVkbvk/wD7EWVJ02G5qteoaqOWnj8H7pYQbpPIkMEAcjcT5QUXarDb5gSWxgnUcW3rs1Qx+q16b/dvpf8AEV7lTyUdXPEkxuL0KPEpps+dthG0qTwvD9vNkg41CTToplUG920SUXUkUMkdYq+BTTeFG5nUKsS4UY3N5iCQxPIHl0K3tA8vyxt8uUhEc1ekTRmF5YZvEEayEYz+HDAEtu3A+wxgasVvQSyNpJlT717jQkTk25PuWRVBaVVKliUI/EjbTkHJGJFz30/FxVvootVvpgyo6jkr5YqVzT0UTFZkpQRKhhfdh+cE8gKwA3D99TUUm/gr58k0BJqqsa3yABp68Sosu2RAsvh8l93GDghdh7n5wNTFR0kxU29FG39dVEU9ZW1dNJO1ukWiaJ0dGkVQSQygDJLeGMjuAD+Wt8Za+5Cvv7E0VfT1taY2ma2RVcTVMKrTl0kUu6qEAUjauxwAwxknjHOqePBc3sEiLqTqW61ktBUyyvCI4F+3eJcy7RtRY4wMBlTBOBliM+x1bGEZK2uhpSdprQKg6kqpLMixwstwiZZHjkVlSONnyzqSeQ53AAcnbk99R6cVO5dD+p7aR06MmOzRxg+WeKmaTPJYswDc9xkemoilxTC2kQXKslmqbVE7Ax7VO3aOc7Qc++R31SNP4AF8maS1XGuO37tZZVWVVAKhVQqBjsAWPHbnSxb5Mpi7WxWqpbZSUstOwV2eSlJZQ48LxIG24bIHIGff11bD5J5NUaitkeSutoeR3D1sULbmJyhiLFfy3AH89VXuh70v3CCu089bRSMXp4HimjUnkOxG4lu57DueNUrSNsttr7BeOxUNvqGqaeARTRSBkYMcAqCBxnHYnVsSlRVKXyCK2sniu9FRpKy0ssdVM8QPlLqi7T8YyRx6caSbdsq+UWavFvs9H4CqglmdHUqGDKAAAQcj1Ok+Ea+MVeiFZGpGgSE+Gi1CuEH4cgEA47HgDU223f3Kcy4uNAmluFRJ0fHUGU+OtLvEgwCCYVJP7k6T+aitusaohepcXKgYrG7GOKUl41bztuVjyO5CqD+Q1EtOkK31/wA+TS9eRJa7taKGlURUlZZYJ54gMhnxKc89uQO2NW5Ek4/sWZUo1X2KtLXTzWiljaVgk00iuqeUYCsRjHblR2+fc6XtbLZvpGotlXMOmKmfxXMqxMcsc7s7gdwPDdh3zqY/pY8G3bZV6jY01HS+ETH4sbo4XjKsEJH5ZZj+ukm6qinJpaMp1ZcaihuwFNJ4Aan7RqFxhEIxjtyc5H+mkW1K/gryNpJoOyUMFPXQiKMRrIEjZU4BDFg3Hucd+/f3OrJa/wBC1rdDlt1Lc56CKqp45ozUxQlWUYZGqY0ZT7gqcHPfj2Gox+6aT61/uDSdX/zoA/yO3ia4VApIhLRSQrT+XyxjtgL2xjjt2AHYDTxbblZTJVkdfBeprbTSmxzNCpkaOWoJ/wD+m3v/AP3fHtuOqZajaLEk2Zjp6ulrerOo6ecRzRW2RDSB4lJh7EgHGcHJyDwfXOkxJbRUm3Jr7BC2SPW2CPx2aQTCBpBnAberM2R7Ejt2HYcHVjSUnRfSor9PFq21T3Cd3lraeV6WOdmO7wvFKbD/AMw2qBzn37nVl0gTcoW/g4h9RrZSP/EBe1+2jVaS2UIgCKFEfl9Ma999G34ab+7N8VT0c++sEEQv3RqiJFDyKHKqAX83qfX9ddef6ZFmTo4Tav8A9yp//kNaJCHe/qPRwwdDRiNAoT7ZRg+m0/vpEO+jhCsRcEOecZ/sdQv0lUuhk/cL6Hvq0SPZZsKg1kGRz9xDz/8AXqqb0/2Y0ixLSQyUdxmZAZEmgVW9gwfcP1wNKpOl+zIT0fav0uq5bv1BTU9U2+np/DihiUBFjTew2qFwAMe3bn31828mK5dfJhUm5bD3Vt5rbf0uJaepeOT/AHtd2c8JN5Rz7bj/AG9hrn8U5JMrcmmi3WVMorLQodgrxSEqDgZAcjA9MH20JKhLdo0V4LQdA9OTxO8Uwngh8SNyrbZHIfJHckevf50vyzRFKUVf4K0FtpqyGVZ4vFAihrV3knbNJtV3X2yD6cZ57860S6KeKfZmeqK6apmqhK4f7aBxESoyoJ5GfnA76r4pWTlSVJEt3kFd1Tbqeojhlp1EkIhaFSm0RRgArjBxnv3/AG1fHpv7f+h+8iv/AJoqV5JnraFiXpYKl0jjc7go3n3/ACBz3zz31kk3yQj/AEJjup6+otHWfUX2spQvAszF/Pl3L7mO7PPYj2IBGCBrTxT7XZTLTdHhcamvmv8AFUTNIlPbp3jzwQVeoVeRycBVAz7aIpKaS6/+hU7exnWk0lntUH2jtEC6uQWLAnCHJBz6sf31biWkvyPLVotQTyNEFLsRKZ3fJ/ESxXn9P/ONO4qmKtxojvMrW2wyU9MfBjp6pfDKjzgF1BG7uQQzcE45z31GJJt2Wv29Fe0VU1S4qJJGad2pEaTOCQ1SgYZ+QNUQ/XXx/wDpEFfJsm6NxUXqISgS/wC6Qy5cbiWeVUcknvlQBj4Htplp0hcbfQL6oLJCjB3ylX4S5YkKqNMFAHpgHHGrl+toj4o0NRxDbKXvAq0+1W5I8pHB75x66y49yf8AUef6qKkltpoXDRx+G009RHIyEglROQFyOw+Nbsi/6jXwWwS5L+p//9k=", @@ -674,12 +692,12 @@ { "data": { "text/plain": [ - "[{'score': 0.4759259521961212, 'label': 'flat-coated retriever'},\n", - " {'score': 0.10909564793109894, 'label': 'Labrador retriever'},\n", - " {'score': 0.08196048438549042, 'label': 'Great Dane'}]" + "[{'score': 0.4759257435798645, 'label': 'flat-coated retriever'},\n", + " {'score': 0.10909581184387207, 'label': 'Labrador retriever'},\n", + " {'score': 0.0819605216383934, 'label': 'Great Dane'}]" ] }, - "execution_count": 13, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -716,33 +734,33 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 16, "id": "1cd9423b-3cba-4cf9-b58a-bffbde08738d", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:48.888614Z", - "iopub.status.busy": "2023-03-12T19:47:48.888417Z", - "iopub.status.idle": "2023-03-12T19:47:52.868441Z", - "shell.execute_reply": "2023-03-12T19:47:52.867833Z", - "shell.execute_reply.started": "2023-03-12T19:47:48.888595Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to CPU ...\n" + ] + }, { "data": { "text/plain": [ - "[{'score': 0.0960492491722107,\n", + "[{'score': 0.09604837000370026,\n", " 'token': 4827,\n", " 'token_str': 'fashion',\n", " 'sequence': 'i am a fashion model'},\n", - " {'score': 0.09326528012752533,\n", + " {'score': 0.09326566755771637,\n", " 'token': 2535,\n", " 'token_str': 'role',\n", " 'sequence': 'i am a role model'}]" ] }, - "execution_count": 14, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -789,29 +807,29 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 17, "id": "5d9cedee-902c-41b2-b6ef-1c04bbd8498e", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:52.869399Z", - "iopub.status.busy": "2023-03-12T19:47:52.869151Z", - "iopub.status.idle": "2023-03-12T19:47:53.395475Z", - "shell.execute_reply": "2023-03-12T19:47:53.394997Z", - "shell.execute_reply.started": "2023-03-12T19:47:52.869376Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to CPU ...\n" + ] + }, { "data": { "text/plain": [ - "{'score': 0.5305245518684387,\n", + "{'score': 0.5305243730545044,\n", " 'start': 12,\n", " 'end': 75,\n", " 'answer': 'an open source toolkit for deep learning inference optimization'}" ] }, - "execution_count": 15, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -845,26 +863,28 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 18, "id": "755096b0-406d-4167-8642-5b4b093e2bc5", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:47:53.396122Z", - "iopub.status.busy": "2023-03-12T19:47:53.395987Z", - "iopub.status.idle": "2023-03-12T19:48:00.052492Z", - "shell.execute_reply": "2023-03-12T19:48:00.052125Z", - "shell.execute_reply.started": "2023-03-12T19:47:53.396109Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the encoder to CPU ...\n", + "Compiling the decoder to CPU ...\n", + "Compiling the decoder to CPU ...\n" + ] + }, { "data": { "text/plain": [ "'Das Haus ist wunderbar.'" ] }, - "execution_count": 16, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -884,16 +904,9 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 19, "id": "c0f096f2-0cd3-40ab-9661-1c7209c6a670", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:48:00.053145Z", - "iopub.status.busy": "2023-03-12T19:48:00.052965Z", - "iopub.status.idle": "2023-03-12T19:48:00.056985Z", - "shell.execute_reply": "2023-03-12T19:48:00.056530Z", - "shell.execute_reply.started": "2023-03-12T19:48:00.053133Z" - }, "tags": [] }, "outputs": [ @@ -906,7 +919,7 @@ " 'translation_en_to_ro']" ] }, - "execution_count": 17, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -918,16 +931,9 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 20, "id": "ebf97e9c-d1e1-4908-8a3a-bad24ee3a34f", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:48:00.057929Z", - "iopub.status.busy": "2023-03-12T19:48:00.057694Z", - "iopub.status.idle": "2023-03-12T19:48:00.399891Z", - "shell.execute_reply": "2023-03-12T19:48:00.399417Z", - "shell.execute_reply.started": "2023-03-12T19:48:00.057914Z" - }, "tags": [] }, "outputs": [ @@ -937,7 +943,7 @@ "[{'translation_text': \"Qu'est-ce qu'un modèle de séquence à séquence?\"}]" ] }, - "execution_count": 18, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -967,26 +973,26 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 21, "id": "59df852f-e9bb-4f96-91d9-23f4e4577ea0", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:48:00.400753Z", - "iopub.status.busy": "2023-03-12T19:48:00.400401Z", - "iopub.status.idle": "2023-03-12T19:48:04.459054Z", - "shell.execute_reply": "2023-03-12T19:48:04.458437Z", - "shell.execute_reply.started": "2023-03-12T19:48:00.400729Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to CPU ...\n" + ] + }, { "data": { "text/plain": [ "[{'label': 'nl', 'score': 0.994126558303833}]" ] }, - "execution_count": 19, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -1018,19 +1024,19 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 22, "id": "87f633c0-3334-4ef0-942a-5bdec807568d", "metadata": { - "execution": { - "iopub.execute_input": "2023-03-12T19:48:04.459956Z", - "iopub.status.busy": "2023-03-12T19:48:04.459670Z", - "iopub.status.idle": "2023-03-12T19:48:07.863986Z", - "shell.execute_reply": "2023-03-12T19:48:07.863615Z", - "shell.execute_reply.started": "2023-03-12T19:48:04.459935Z" - }, "tags": [] }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Compiling the model to CPU ...\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -1070,6 +1076,14 @@ "- Notebook: [Post Training Quantization of a question-answering model](https://github.com/huggingface/optimum-intel/blob/main/notebooks/openvino/question_answering_quantization.ipynb)\n", "- Examples: [Quantization Aware Training examples](https://github.com/huggingface/optimum-intel/tree/main/examples/openvino)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0606fb02-2367-4573-8461-7fc8c065ece6", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -1088,7 +1102,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.10.12" }, "widgets": { "application/vnd.jupyter.widget-state+json": { diff --git a/notebooks/openvino/question_answering_quantization.ipynb b/notebooks/openvino/question_answering_quantization.ipynb index 782a48ff45..ba4a84ca38 100644 --- a/notebooks/openvino/question_answering_quantization.ipynb +++ b/notebooks/openvino/question_answering_quantization.ipynb @@ -846,9 +846,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "FP32 model size: 436.07 MB\n", - "INT8 model size: 182.41 MB\n", - "INT8 size decrease: 2.39x\n" + "FP32 model size: 436.50 MB\n", + "INT8 model size: 181.84 MB\n", + "INT8 size decrease: 2.4x\n" ] } ], @@ -858,7 +858,7 @@ " Return OpenVINO or PyTorch model size in Mb.\n", " Arguments:\n", " model_folder:\n", - " Directory containing a pytorch_model.bin for a PyTorch model, and an openvino_model.xml/.bin for an OpenVINO model.\n", + " Directory containing a model.safetensors for a PyTorch model, and an openvino_model.xml/.bin for an OpenVINO model.\n", " framework:\n", " Define whether the model is a PyTorch or an OpenVINO model.\n", " \"\"\"\n", @@ -866,7 +866,7 @@ " model_path = Path(model_folder) / \"openvino_model.xml\"\n", " model_size = model_path.stat().st_size + model_path.with_suffix(\".bin\").stat().st_size\n", " elif framework.lower() == \"pytorch\":\n", - " model_path = Path(model_folder) / \"pytorch_model.bin\"\n", + " model_path = Path(model_folder) / \"model.safetensors\"\n", " model_size = model_path.stat().st_size\n", " model_size /= 1000 * 1000\n", " return model_size\n", From 2b186f2703b3f3102c05abe2123f004094d5262e Mon Sep 17 00:00:00 2001 From: Ofir Zafrir Date: Fri, 15 Mar 2024 22:11:02 +0200 Subject: [PATCH 8/8] Add Phi-2 on Intel's MTL iGPU demo notebook (#606) * Add phi-2 notebook * notebook update * remove stateful setting use default instead --- notebooks/openvino/phi-2_on_mtl.ipynb | 583 ++++++++++++++++++++++++++ 1 file changed, 583 insertions(+) create mode 100644 notebooks/openvino/phi-2_on_mtl.ipynb diff --git a/notebooks/openvino/phi-2_on_mtl.ipynb b/notebooks/openvino/phi-2_on_mtl.ipynb new file mode 100644 index 0000000000..88f0387f05 --- /dev/null +++ b/notebooks/openvino/phi-2_on_mtl.ipynb @@ -0,0 +1,583 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "aeb16663-be53-4260-b62d-44611b6771ec", + "metadata": {}, + "source": [ + "# Chat and Code with Phi-2 with OpenVINO and 🤗 Optimum on Intel Meteor Lake iGPU\n", + "In this notebook we will show how to export and apply weight only quantization on Phi-2 to 4 bits.\n", + "Then using the quantized model we will show how to generate code completions with the model running on Intel Meteor Lake iGPU presenting a good experience of running GenAI locally on Intel PC marking the start of the AIPC Era!\n", + "Then we will show how to talk with Phi-2 in a ChatBot demo running completely locally on your Laptop!\n", + "\n", + "[Phi-2](https://huggingface.co/microsoft/phi-2) is a 2.7 billion-parameter language model trained by Microsoft. Microsoft in the model's release [blog post](https://www.microsoft.com/en-us/research/blog/phi-2-the-surprising-power-of-small-language-models/) states that Phi-2:\n", + "> demonstrates outstanding reasoning and language understanding capabilities, showcasing state-of-the-art performance among base language models with less than 13 billion parameters. On complex benchmarks Phi-2 matches or outperforms models up to 25x larger, thanks to new innovations in model scaling and training data curation." + ] + }, + { + "cell_type": "markdown", + "id": "03cb49cf-bc6f-4702-a61f-227b352404cb", + "metadata": {}, + "source": [ + "## Install dependencies\n", + "Make sure you have the latest GPU drivers installed on your machine: https://docs.openvino.ai/2024/get-started/configurations/configurations-intel-gpu.html.\n", + "\n", + "We will start by installing the dependencies, that can be done by uncommenting the following cell and run it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "96d8203c-34c9-41a2-95bd-3891533840a1", + "metadata": {}, + "outputs": [], + "source": [ + "# ! pip install optimum[openvino,nncf] torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5980ce40-0be1-48c1-941a-92c484d4da31", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from transformers import AutoTokenizer\n", + "from optimum.intel import OVModelForCausalLM, OVWeightQuantizationConfig" + ] + }, + { + "cell_type": "markdown", + "id": "48b81857-a095-43a3-8c8d-4c880b743a6e", + "metadata": {}, + "source": [ + "## Configuration\n", + "Here we will configure which model to load and other attributes. We will explain everything 😄\n", + "* `model_name`: the name or path of the model we want to export and quantize, can be either on the 🤗 Hub or a local directory on your laptop.\n", + "* `save_name`: directory where the exported & quantized model will be saved.\n", + "* `precision`: the compute data type we will use for inference of the model, can be either `f32` or `f16`. We use FP32 precision due to Phi-2 overflow issues in FP16.\n", + "* `quantization_config`: here we set the attributes for the weight only quantization algorithm:\n", + " * `bits`: number of bits to use for quantization, can be either `8` or `4`.\n", + " * `sym`: whether to use symmetric quantization or not, can be either `True` or `False`.\n", + " * `group_size`: number of weights to group together for quantization. We use groups of 128 to ensure no accuracy degradation.\n", + " * `ratio`: the ratio of the model to quantize to #`bits`. The rest will be quantize to the default bits number, `8`.\n", + "* `device`: the device to use for inference, can be either `cpu` or `gpu`.\n", + "* `stateful`: Optimize model by setting the KV cache as part of the models state instead of as an input\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "800cd7a3-a21d-4a0a-9d73-2a2d08646f99", + "metadata": {}, + "outputs": [], + "source": [ + "model_name = 'microsoft/phi-2'\n", + "save_name = './phi-2-woq4'\n", + "precision = 'f32'\n", + "quantization_config = OVWeightQuantizationConfig(\n", + " bits=4,\n", + " sym=False,\n", + " group_size=128,\n", + " ratio=0.8,\n", + ")\n", + "device = 'gpu'" + ] + }, + { + "cell_type": "markdown", + "id": "1f398868-93d7-4c2d-9591-9bac8e9b701c", + "metadata": {}, + "source": [ + "With this configuration we expect the model size to reduce to around to 1.62GB: $0.8 \\times 2.7{\\times}10^3 \\times \\frac{1}{2}\\text{B} + 0.2 * 2.7{\\times}10^3 \\times 1\\text{B} = 1.62{\\times}10^3\\text{B} = 1.62\\text{GB}$" + ] + }, + { + "cell_type": "markdown", + "id": "d994997d-344c-4d6c-ab08-f78ecb7f56ec", + "metadata": {}, + "source": [ + "## Export & quantize\n", + "OpenVINO together with 🤗 Optimum enables you to load, export and quantize a model in a single `from_pretrained` call making the process as simple as possible.\n", + "Then, we will save the exported & quantized model locally on our laptop. If the model was already exported and saved before we will load the locally saved model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "03a308c6-27e7-4926-8ac4-4fa0c1ca68d2", + "metadata": {}, + "outputs": [], + "source": [ + "# Load kwargs\n", + "load_kwargs = {\n", + " 'device': device,\n", + " 'ov_config': {\n", + " \"PERFORMANCE_HINT\": \"LATENCY\",\n", + " \"INFERENCE_PRECISION_HINT\": precision,\n", + " \"CACHE_DIR\": os.path.join(save_name, \"model_cache\"), # OpenVINO will use this directory as cache\n", + " },\n", + " 'compile': False,\n", + " 'quantization_config': quantization_config\n", + "}\n", + "\n", + "# Check whether the model was already exported\n", + "saved = os.path.exists(save_name)\n", + "\n", + "model = OVModelForCausalLM.from_pretrained(\n", + " model_name if not saved else save_name,\n", + " export=not saved,\n", + " **load_kwargs,\n", + ")\n", + "\n", + "# Load tokenizer to be used with the model\n", + "tokenizer = AutoTokenizer.from_pretrained(model_name if not saved else save_name)\n", + "\n", + "# Save the exported model locally\n", + "if not saved:\n", + " model.save_pretrained(save_name)\n", + " tokenizer.save_pretrained(save_name)\n", + "\n", + "# TODO Optional: export to huggingface/hub\n", + "\n", + "model_size = os.stat(os.path.join(save_name, 'openvino_model.bin')).st_size / 1024 ** 3\n", + "print(f'Model size in FP32: ~5.4GB, current model size in 4bit: {model_size:.2f}GB')" + ] + }, + { + "cell_type": "markdown", + "id": "592e118d-e8bb-491f-92b2-d0418e19158c", + "metadata": {}, + "source": [ + "We can see the model size was reduced to 1.7GB as expected. After loading the model we can switch the model between devices using `model.to('gpu')` for example.\n", + "After we have finished to configure everything, we can compile the model by calling `model.compile()` and the model will be ready for usage." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3cef4dc0-191e-4755-a639-c3e8adbd18a2", + "metadata": {}, + "outputs": [], + "source": [ + "model.compile()" + ] + }, + { + "cell_type": "markdown", + "id": "dd3c467e-3bbb-4265-9075-1c6688af2f92", + "metadata": {}, + "source": [ + "## Generate using the exported model\n", + "We will now show an example where we will use our quantized Phi-2 to generate code in Python. \n", + "Phi-2 knows how to do code completions where the model is given a function's signature and its docstring and the model will generate the implementation of the function.\n", + "\n", + "In our example we have taken one of the samples from the test set of HumanEval dataset. \n", + "HumanEval is a code completion dataset used to train and benchmark models on code completion in Python. \n", + "Phi-2 has scored a remarkable result on the HumanEval dataset and is an excellent model to use for code completions.\n", + "\n", + "Note: the first time you run the model might take more time due to loading and compilation overheads of the first inference" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b4ea738-7db5-490e-9338-d6420b77796c", + "metadata": {}, + "outputs": [], + "source": [ + "sample = \"\"\"from typing import List\n", + "\n", + "\n", + "def has_close_elements(numbers: List[float], threshold: float) -> bool:\n", + " \\\"\\\"\\\" Check if in given list of numbers, are any two numbers closer to each other than\n", + " given threshold.\n", + " >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n", + " False\n", + " >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n", + " True\n", + " \\\"\\\"\\\"\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14ffe7f9-7d93-4a49-95d8-5f2a4e400cfe", + "metadata": {}, + "outputs": [], + "source": [ + "from transformers import TextStreamer\n", + "\n", + "# Tokenize the sample\n", + "inputs = tokenizer([sample], return_tensors='pt')\n", + "\n", + "# Call generate on the inputs\n", + "out = model.generate(\n", + " **inputs,\n", + " max_new_tokens=128,\n", + " streamer=TextStreamer(tokenizer=tokenizer, skip_special_tokens=True),\n", + " pad_token_id=tokenizer.eos_token_id,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "3f8aa25c-de59-4e79-9a1f-c03ec76d206a", + "metadata": {}, + "source": [ + "## Chatbot demo\n", + "We will continue to build a chatbot demo running with Gradio using the model we just exported and quantized.\n", + "The chatbot will be rather simple where the user will input a message and the model will reply to the user by generating text using the entire chat history as the input to the model.\n", + "\n", + "A lot of models that were trained for the chatbot use case have been trained with special tokens to tell the model who is the current speaker and with a special system message. \n", + "Phi-2 wasn't trained specifically for the chatbot use case and doesn't have any special tokens either, however, it has seen chats in the training data and therefore is suited for that use case.\n", + "\n", + "The chat template we will use is rather simple:\n", + "```\n", + "User: \n", + "Assistant: \n", + "User: \n", + "...\n", + "```\n", + "\n", + "We will start by writing the core function of the chatbot that receives the entire history of the chat and generates the assistant's response.\n", + "To support this core function we will build a few assistant functions to prepare the input for the model and to stop generation in time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7e81d125-ff47-4122-853d-11a2763db146", + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "from threading import Thread\n", + "\n", + "from transformers import (\n", + " TextIteratorStreamer,\n", + " StoppingCriteria,\n", + " StoppingCriteriaList,\n", + " GenerationConfig,\n", + ")\n", + "\n", + "\n", + "# Copied and modified from https://github.com/bigcode-project/bigcode-evaluation-harness/blob/main/bigcode_eval/generation.py#L13\n", + "class SuffixCriteria(StoppingCriteria):\n", + " def __init__(self, start_length, eof_strings, tokenizer, check_fn=None):\n", + " self.start_length = start_length\n", + " self.eof_strings = eof_strings\n", + " self.tokenizer = tokenizer\n", + " if check_fn is None:\n", + " check_fn = lambda decoded_generation: any(\n", + " [decoded_generation.endswith(stop_string) for stop_string in self.eof_strings]\n", + " )\n", + " self.check_fn = check_fn\n", + "\n", + " def __call__(self, input_ids, scores, **kwargs):\n", + " \"\"\"Returns True if generated sequence ends with any of the stop strings\"\"\"\n", + " decoded_generations = self.tokenizer.batch_decode(input_ids[:, self.start_length :])\n", + " return all([self.check_fn(decoded_generation) for decoded_generation in decoded_generations])\n", + "\n", + "\n", + "def is_partial_stop(output, stop_str):\n", + " \"\"\"Check whether the output contains a partial stop str.\"\"\"\n", + " for i in range(0, min(len(output), len(stop_str))):\n", + " if stop_str.startswith(output[-i:]):\n", + " return True\n", + " return False\n", + "\n", + "\n", + "\n", + "# Set the chat template to the tokenizer. The chat template implements the simple template of\n", + "# User: content\n", + "# Assistant: content\n", + "# ...\n", + "# Read more about chat templates here https://huggingface.co/docs/transformers/main/en/chat_templating\n", + "tokenizer.chat_template = \"{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}\"\n", + "\n", + "\n", + "def prepare_history_for_model(history):\n", + " \"\"\"\n", + " Converts the history to a tokenized prompt in the format expected by the model.\n", + " Params:\n", + " history: dialogue history\n", + " Returns:\n", + " Tokenized prompt\n", + " \"\"\"\n", + " messages = []\n", + " for idx, (user_msg, model_msg) in enumerate(history):\n", + " # skip the last assistant message if its empty, the tokenizer will do the formating\n", + " if idx == len(history) - 1 and not model_msg:\n", + " messages.append({'role': 'User', 'content': user_msg})\n", + " break\n", + " if user_msg:\n", + " messages.append({'role': 'User', 'content': user_msg})\n", + " if model_msg:\n", + " messages.append({'role': 'Assistant', 'content': model_msg})\n", + " input_token = tokenizer.apply_chat_template(\n", + " messages,\n", + " add_generation_prompt=True,\n", + " tokenize=True,\n", + " return_tensors=\"pt\",\n", + " return_dict=True\n", + " )\n", + " return input_token\n", + "\n", + "\n", + "def generate(history, temperature, max_new_tokens, top_p, repetition_penalty):\n", + " \"\"\"\n", + " Generates the assistant's reponse given the chatbot history and generation parameters\n", + "\n", + " Params:\n", + " history: conversation history formated in pairs of user and assistant messages `[user_message, assistant_message]`\n", + " temperature: parameter for control the level of creativity in AI-generated text.\n", + " By adjusting the `temperature`, you can influence the AI model's probability distribution, making the text more focused or diverse.\n", + " max_new_tokens: The maximum number of tokens we allow the model to generate as a response.\n", + " top_p: parameter for control the range of tokens considered by the AI model based on their cumulative probability.\n", + " repetition_penalty: parameter for penalizing tokens based on how frequently they occur in the text.\n", + " Yields:\n", + " Updated history and generation status.\n", + " \"\"\"\n", + " start = time.perf_counter()\n", + " # Construct the input message string for the model by concatenating the current system message and conversation history\n", + " # Tokenize the messages string\n", + " inputs = prepare_history_for_model(history)\n", + " input_length = inputs['input_ids'].shape[1]\n", + " # truncate input in case it is too long.\n", + " # TODO improve this\n", + " if input_length > 2000:\n", + " history = [history[-1]]\n", + " inputs = prepare_history_for_model(history)\n", + " input_length = inputs['input_ids'].shape[1]\n", + "\n", + " prompt_char = '▌'\n", + " history[-1][1] = prompt_char\n", + " yield (history, 'Status: Generating...')\n", + " \n", + " streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n", + "\n", + " # Create a stopping criteria to prevent the model from playing the role of the user aswell.\n", + " stop_str = f'\\nUser:'\n", + " stopping_criteria = StoppingCriteriaList([SuffixCriteria(input_length, [stop_str], tokenizer)])\n", + " # Prepare input for generate\n", + " generation_config = GenerationConfig(\n", + " max_new_tokens=max_new_tokens,\n", + " do_sample=temperature > 0.0,\n", + " temperature=temperature if temperature > 0.0 else 1.0,\n", + " repetition_penalty=repetition_penalty,\n", + " top_p=top_p,\n", + " eos_token_id=[tokenizer.eos_token_id],\n", + " pad_token_id=tokenizer.eos_token_id,\n", + " )\n", + " generate_kwargs = dict(\n", + " streamer=streamer,\n", + " generation_config=generation_config,\n", + " stopping_criteria=stopping_criteria,\n", + " ) | inputs\n", + "\n", + " t1 = Thread(target=model.generate, kwargs=generate_kwargs)\n", + " t1.start()\n", + "\n", + " # Initialize an empty string to store the generated text.\n", + " partial_text = \"\"\n", + " for new_text in streamer:\n", + " partial_text += new_text\n", + " history[-1][1] = partial_text + prompt_char\n", + " # We don't yield the generated text until we are sure it is not the stop string\n", + " pos = partial_text.rfind(stop_str)\n", + " if pos != -1:\n", + " partial_text = partial_text[:pos]\n", + " break\n", + " elif is_partial_stop(partial_text, stop_str):\n", + " continue\n", + " yield (history, 'Status: Generating...')\n", + " history[-1][1] = partial_text\n", + " generation_time = time.perf_counter() - start\n", + " yield (history, f'Generation time: {generation_time:.2f} sec')" + ] + }, + { + "cell_type": "markdown", + "id": "29fe1ae5-9929-4789-9293-612b2062e2a8", + "metadata": {}, + "source": [ + "Next we will create the actual demo using Gradio. The layout will be very simple, a chatbot window followed by a text prompt and some controls.\n", + "We will also include sliders to adjust generation parameters like temperature and length of response we allow the model to generate.\n", + "\n", + "To install Gradio dependency, please uncomment the following cell and run" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b61a9a9f", + "metadata": {}, + "outputs": [], + "source": [ + "# ! pip install gradio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ae1aa4e-3539-49a1-8f32-62b818ee1002", + "metadata": {}, + "outputs": [], + "source": [ + "import gradio as gr\n", + "\n", + "\n", + "EXAMPLES = [\n", + " [\"What is OpenVINO?\"],\n", + " [\"Can you explain to me briefly what is Python programming language?\"],\n", + " [\"Explain the plot of Cinderella in a sentence.\"],\n", + " [\"Write a Python function to perform binary search over a sorted list. Use markdown to write code\"],\n", + " [\"Lily has a rubber ball that she drops from the top of a wall. The wall is 2 meters tall. How long will it take for the ball to reach the ground?\"],\n", + "]\n", + "\n", + "\n", + "def add_user_text(message, history):\n", + " \"\"\"\n", + " Add user's message to chatbot history\n", + "\n", + " Params:\n", + " message: current user message\n", + " history: conversation history\n", + " Returns:\n", + " Updated history, clears user message and status\n", + " \"\"\"\n", + " # Append current user message to history with a blank assistant message which will be generated by the model\n", + " history.append([message, None])\n", + " return ('', history)\n", + "\n", + "\n", + "with gr.Blocks(theme=gr.themes.Soft()) as demo:\n", + " gr.Markdown('

Chat with Phi-2 on Meteor Lake iGPU

')\n", + " chatbot = gr.Chatbot()\n", + " with gr.Row():\n", + " msg = gr.Textbox(placeholder=\"Enter message here...\", show_label=False, autofocus=True, scale=75)\n", + " status = gr.Textbox(\"Status: Idle\", show_label=False, max_lines=1, scale=25)\n", + " with gr.Row():\n", + " submit = gr.Button(\"Submit\", variant='primary')\n", + " clear = gr.Button(\"Clear\")\n", + " with gr.Accordion(\"Advanced Options:\", open=False):\n", + " with gr.Row():\n", + " with gr.Column():\n", + " temperature = gr.Slider(\n", + " label=\"Temperature\",\n", + " value=0.0,\n", + " minimum=0.0,\n", + " maximum=1.0,\n", + " step=0.05,\n", + " interactive=True,\n", + " )\n", + " max_new_tokens = gr.Slider(\n", + " label=\"Max new tokens\",\n", + " value=128,\n", + " minimum=0,\n", + " maximum=512,\n", + " step=32,\n", + " interactive=True,\n", + " )\n", + " with gr.Column():\n", + " top_p = gr.Slider(\n", + " label=\"Top-p (nucleus sampling)\",\n", + " value=1.0,\n", + " minimum=0.0,\n", + " maximum=1.0,\n", + " step=0.05,\n", + " interactive=True,\n", + " )\n", + " repetition_penalty = gr.Slider(\n", + " label=\"Repetition penalty\",\n", + " value=1.0,\n", + " minimum=1.0,\n", + " maximum=2.0,\n", + " step=0.1,\n", + " interactive=True,\n", + " )\n", + " gr.Examples(\n", + " EXAMPLES, inputs=msg, label=\"Click on any example and press the 'Submit' button\"\n", + " )\n", + "\n", + " # Sets generate function to be triggered when the user submit a new message\n", + " gr.on(\n", + " triggers=[submit.click, msg.submit],\n", + " fn=add_user_text,\n", + " inputs=[msg, chatbot],\n", + " outputs=[msg, chatbot],\n", + " queue=False,\n", + " ).then(\n", + " fn=generate,\n", + " inputs=[chatbot, temperature, max_new_tokens, top_p, repetition_penalty],\n", + " outputs=[chatbot, status],\n", + " concurrency_limit=1,\n", + " queue=True\n", + " )\n", + " \n", + " clear.click(fn=lambda: (None, 'Status: Idle'), inputs=None, outputs=[chatbot, status], queue=False)" + ] + }, + { + "cell_type": "markdown", + "id": "1d1baf09-26f1-40ab-896c-3468b5e89fec", + "metadata": {}, + "source": [ + "That's it, all that is left is to start the demo!\n", + "\n", + "When you're done you can use `demo.close()` to close the demo" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b73962d-f977-45b7-be3a-32b65e546737", + "metadata": {}, + "outputs": [], + "source": [ + "demo.launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e26a0bc-6a78-4185-8b0c-7e9450ba5868", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "# demo.close()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}